/* COPYRIGHT HEADER GOES HERE: No CopyRight Header String Passed During Model Conversion */ /* Command Line used: qnn-onnx-converter; act_bitwidth=8; act_quantizer=tf; act_quantizer_calibration=min-max; act_quantizer_schema=asymmetric; adjust_nms_features_dims=True; algorithms=[]; align_matmul_ranks=True; apply_masked_softmax=uncompressed; arch_checker=False; backend=None; batch=None; bias_bitwidth=8; calc_static_encodings=False; converter_op_package_lib=; copyright_file=None; custom_io=; custom_op_config_paths=None; debug=-1; defer_loading=False; define_symbol=None; disable_batchnorm_folding=False; disable_defer_loading=False; disable_node_validation=False; disable_qnn_op_config_validation=False; disable_relu_squashing=False; dry_run=None; dumpIR=False; dump_custom_io_config_template=; dump_encoding_json=False; dump_inferred_model=False; dump_qairt_io_config_yaml=; dump_qairt_quantizer_command=None; dump_value_info=False; enable_framework_trace=False; enable_match_gathernd=False; enable_match_topk=False; enable_per_row_quantized_bias=False; exclude_named_tensors=False; expand_gru_op_structure=True; expand_lstm_op_structure=False; expand_sparse_op_structure=False; export_format=cpp; extract_color_transform=True; float_bias_bitwidth=0; float_bias_bw=0; float_bitwidth=32; float_bw=32; float_fallback=False; force_prune_cast_ops=False; handle_gather_negative_indices=True; ignore_encodings=False; include_data_invariant_ops=False; inject_cast_for_gather=True; input_dim=[['text_ids', '1,128'], ['style_ttl', '1,50,256'], ['text_mask', '1,1,128']]; input_dtype=[]; input_encoding=[]; input_layout=[]; input_list=./calibration_data/text_encoder_input_list.txt; input_type=[]; keep_disconnected_nodes=False; keep_int64_inputs=False; keep_quant_nodes=False; keep_weights_quantized=False; match_caffe_ssd_to_tf=True; model_version=None; multi_time_steps_gru=False; multi_time_steps_lstm=False; no_simplification=False; op_package_lib=; out_names=['text_emb']; overwrite_model_prefix=False; pack_4_bit_weights=False; package_name=None; packed_masked_softmax_inputs=[]; packed_max_seq=1; param_quantizer=tf; param_quantizer_calibration=min-max; param_quantizer_schema=asymmetric; percentile_calibration_value=99.99; perform_axes_to_spatial_first_order=True; perform_layout_transformation=False; prepare_inputs_as_params=False; preprocess_roi_pool_inputs=True; preserve_io=[]; preserve_onnx_output_order=False; quantization_overrides=; quantizer_log=None; restrict_quantization_steps=[]; squash_box_decoder=True; unroll_gru_time_steps=True; unroll_lstm_time_steps=True; use_aimet_quantizer=False; use_convert_quantization_nodes=False; use_dynamic_16_bit_weights=False; use_native_dtype=False; use_native_input_files=False; use_native_output_files=False; use_per_channel_quantization=False; use_per_row_quantization=False; use_quantize_v2=False; validate_models=False; weights_bitwidth=8 */ #include "QnnOpDef.h" #include "QnnModel.hpp" // Flag to determine if Backend should do node validation for each opNode added #define DO_GRAPH_NODE_VALIDATIONS 1 using namespace qnn_wrapper_api; const __attribute__((visibility("default"))) char* QNN_SDK_VERSION = "qaisw-v2.37.1.250807093845_124904"; extern "C" { static ModelError_t addTensor_text_ids(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_text_ids[] = {1, 128}; VALIDATE(model.addTensor("text_ids", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_ids", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_text_ids, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_style_ttl(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_style_ttl[] = {1, 256, 50}; VALIDATE(model.addTensor("style_ttl", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_ttl", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 3, .dimensions=dimensions_style_ttl, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_text_mask(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_text_mask[] = {1, 128, 1}; VALIDATE(model.addTensor("text_mask", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_text_mask, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode_style_ttl_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR style_ttl_ncf */ uint32_t dimensions_style_ttl_ncf_perm[] = {3}; uint32_t style_ttl_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params_style_ttl_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_ttl_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_style_ttl_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)style_ttl_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs_style_ttl_ncf[] = { "style_ttl" }; uint32_t dimensions_style_ttl_ncf[] = {1, 50, 256}; Qnn_Tensor_t outputs_style_ttl_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_ttl_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 3, .dimensions=dimensions_style_ttl_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "style_ttl_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params_style_ttl_ncf, // Node Params 1, // Num Node Params inputs_style_ttl_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs_style_ttl_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape */ const char* inputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape[] = { "style_ttl_ncf" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape[] = {50, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape */ const char* inputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape[] = { "style_ttl_ncf" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape[] = {50, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_text_mask_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR text_mask_ncf */ uint32_t dimensions_text_mask_ncf_perm[] = {3}; uint32_t text_mask_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params_text_mask_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_text_mask_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)text_mask_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs_text_mask_ncf[] = { "text_mask" }; uint32_t dimensions_text_mask_ncf[] = {1, 1, 128}; Qnn_Tensor_t outputs_text_mask_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_text_mask_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "text_mask_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params_text_mask_ncf, // Node Params 1, // Num Node Params inputs_text_mask_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs_text_mask_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_text_embedder_char_embedder_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_text_embedder_char_embedder_weight[] = {163, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_text_embedder_char_embedder_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_text_embedder_char_embedder_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039561334997416f, .offset= -130}}}, .rank= 2, .dimensions=dimensions_tts_ttl_text_encoder_text_embedder_char_embedder_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_text_embedder_char_embedder_weight), .dataSize=BINLEN(tts_ttl_text_encoder_text_embedder_char_embedder_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_text_embedder_char_embedder_Gather(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_text_embedder_char_embedder_Gather */ Qnn_Param_t params__text_encoder_text_embedder_char_embedder_Gather[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_INT_32, {.int32Value = 0}}}} }; const char* inputs__text_encoder_text_embedder_char_embedder_Gather[] = { "tts_ttl_text_encoder_text_embedder_char_embedder_weight", "text_ids" }; uint32_t dimensions__text_encoder_text_embedder_char_embedder_Gather_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_text_embedder_char_embedder_Gather[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_text_embedder_char_embedder_Gather_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039561334997416f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_text_embedder_char_embedder_Gather_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_text_embedder_char_embedder_Gather", // Node Name "qti.aisw", // Package Name "Gather", // Qnn Node Type params__text_encoder_text_embedder_char_embedder_Gather, // Node Params 1, // Num Node Params inputs__text_encoder_text_embedder_char_embedder_Gather, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_text_embedder_char_embedder_Gather, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Unsqueeze */ const char* inputs__text_encoder_attn_encoder_Unsqueeze[] = { "text_mask_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_Unsqueeze_output_0[] = {1, 1, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Unsqueeze_1 */ const char* inputs__text_encoder_attn_encoder_Unsqueeze_1[] = { "text_mask_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_Unsqueeze_1_output_0[] = {1, 1, 128, 1}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_text_embedder_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_text_embedder_Transpose */ uint32_t dimensions__text_encoder_text_embedder_Transpose_perm[] = {3}; uint32_t _text_encoder_text_embedder_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_text_embedder_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_text_embedder_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_text_embedder_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_text_embedder_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_text_embedder_Transpose[] = { "_text_encoder_text_embedder_char_embedder_Gather_output_0" }; uint32_t dimensions__text_encoder_text_embedder_Transpose_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_text_embedder_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_text_embedder_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039561334997416f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_text_embedder_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_text_embedder_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_text_embedder_Transpose, // Node Params 1, // Num Node Params inputs__text_encoder_text_embedder_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_text_embedder_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3680(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3680[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3680", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3680", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067435698583722f, .offset= -126}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3680, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3680), .dataSize=BINLEN(onnx__MatMul_3680)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007403795607388f, .offset= -112}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_W_value_linear_MatMul */ const char* inputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul[] = { "_speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3680", "tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0_fc[] = {50, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape */ const char* inputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape[] = { "_speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0[] = {1, 50, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Unsqueeze_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Unsqueeze_6 */ const char* inputs__speech_prompted_text_encoder_attention1_Unsqueeze_6[] = { "text_mask" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_6_output_0[] = {1, 1, 128, 1}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Unsqueeze_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Unsqueeze_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Unsqueeze_6", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Unsqueeze_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Unsqueeze_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3684(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3684[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3684", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3684", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0076378448866308f, .offset= -131}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3684, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3684), .dataSize=BINLEN(onnx__MatMul_3684)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005847453139722f, .offset= -137}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_W_value_linear_MatMul */ const char* inputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul[] = { "_speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3684", "tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0_fc[] = {50, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape */ const char* inputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape[] = { "_speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0[] = {1, 50, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Mul */ Qnn_Param_t params__text_encoder_attn_encoder_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_Mul[] = { "_text_encoder_attn_encoder_Unsqueeze_output_0", "_text_encoder_attn_encoder_Unsqueeze_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_Mul_output_0[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_text_embedder_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_text_embedder_Mul */ Qnn_Param_t params__text_encoder_text_embedder_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_text_embedder_Mul[] = { "_text_encoder_text_embedder_Transpose_output_0", "text_mask_ncf" }; uint32_t dimensions__text_encoder_text_embedder_Mul_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_text_embedder_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_text_embedder_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__text_encoder_text_embedder_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_text_embedder_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_text_embedder_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_text_embedder_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_text_embedder_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_text_embedder_Mul_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_text_embedder_Mul_output_0_nfc */ uint32_t dimensions__text_encoder_text_embedder_Mul_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_text_embedder_Mul_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_text_embedder_Mul_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_text_embedder_Mul_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_text_embedder_Mul_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_text_embedder_Mul_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_text_embedder_Mul_output_0_nfc[] = { "_text_encoder_text_embedder_Mul_output_0" }; uint32_t dimensions__text_encoder_text_embedder_Mul_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_text_embedder_Mul_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_text_embedder_Mul_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__text_encoder_text_embedder_Mul_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_text_embedder_Mul_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_text_embedder_Mul_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_text_embedder_Mul_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_text_embedder_Mul_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Split_2 */ uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_2_split_index[] = {1}; uint32_t _speech_prompted_text_encoder_attention1_Split_2_split_index[] = {128}; Qnn_Param_t params__speech_prompted_text_encoder_attention1_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_attention1_Split_2_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Split_2[] = { "_speech_prompted_text_encoder_attention1_W_value_linear_Add_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_2_output_0[] = {1, 50, 128}; uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_2_output_1[] = {1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Split_2, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Split_2, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addTensor__speech_prompted_text_encoder_attention1_Constant_11_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__speech_prompted_text_encoder_attention1_Constant_11_output_0[] = {1}; VALIDATE(model.addTensor("_speech_prompted_text_encoder_attention1_Constant_11_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Constant_11_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Constant_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_speech_prompted_text_encoder_attention1_Constant_11_output_0), .dataSize=BINLEN(_speech_prompted_text_encoder_attention1_Constant_11_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Equal(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Equal */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Equal[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Equal[] = { "_speech_prompted_text_encoder_attention1_Unsqueeze_6_output_0", "_speech_prompted_text_encoder_attention1_Constant_11_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Cast_output_0[] = {1, 1, 128, 1}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Equal[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Cast_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_BOOL_8, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Cast_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Equal", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Equal, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Equal, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Equal, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Split_2 */ uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_2_split_index[] = {1}; uint32_t _speech_prompted_text_encoder_attention2_Split_2_split_index[] = {128}; Qnn_Param_t params__speech_prompted_text_encoder_attention2_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_attention2_Split_2_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Split_2[] = { "_speech_prompted_text_encoder_attention2_W_value_linear_Add_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_2_output_0[] = {1, 50, 128}; uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_2_output_1[] = {1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Split_2, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Split_2, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Equal(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Equal */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Equal[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Equal[] = { "_text_encoder_attn_encoder_Mul_output_0", "_speech_prompted_text_encoder_attention1_Constant_11_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Equal[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_BOOL_8, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Equal", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Equal, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Equal, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Equal, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_Mul */ Qnn_Param_t params__text_encoder_convnext_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_0_Mul[] = { "_text_encoder_text_embedder_Mul_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_0_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Unsqueeze_4 */ const char* inputs__speech_prompted_text_encoder_attention1_Unsqueeze_4[] = { "_speech_prompted_text_encoder_attention1_Split_2_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_4_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Unsqueeze_5 */ const char* inputs__speech_prompted_text_encoder_attention1_Unsqueeze_5[] = { "_speech_prompted_text_encoder_attention1_Split_2_output_1" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_5_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Unsqueeze_4 */ const char* inputs__speech_prompted_text_encoder_attention2_Unsqueeze_4[] = { "_speech_prompted_text_encoder_attention2_Split_2_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_4_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Unsqueeze_5 */ const char* inputs__speech_prompted_text_encoder_attention2_Unsqueeze_5[] = { "_speech_prompted_text_encoder_attention2_Split_2_output_1" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_5_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Concat_2 */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Concat_2[] = { "_speech_prompted_text_encoder_attention1_Unsqueeze_4_output_0", "_speech_prompted_text_encoder_attention1_Unsqueeze_5_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Concat_2_output_0[] = {2, 1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060068476013839f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Concat_2, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Concat_2 */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Concat_2[] = { "_speech_prompted_text_encoder_attention2_Unsqueeze_4_output_0", "_speech_prompted_text_encoder_attention2_Unsqueeze_5_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Concat_2_output_0[] = {2, 1, 50, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0105030238628387f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Concat_2, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Pad */ uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__text_encoder_convnext_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_convnext_convnext_0_dwconv_Pad[] = { "_text_encoder_convnext_convnext_0_Mul_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_output_0[] = {1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_convnext_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { "_text_encoder_convnext_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 256, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 256, 1, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018772240728140f, .offset= -143}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight[] = {1, 5, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048351162113249f, .offset= -106}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003789643233176f, .offset= -162}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_dwconv_Conv_2d[] = { "_text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight", "tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022551277652383f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__text_encoder_convnext_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022551277652383f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate[] = { "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022551277652383f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022551277652383f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_Mul_1 */ Qnn_Param_t params__text_encoder_convnext_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_0_Mul_1[] = { "_text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_0_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022551277652383f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028256347868592f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017984028672799f, .offset= -103}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization[] = { "_text_encoder_convnext_convnext_0_norm_Transpose_output_0", "tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight", "tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0294354781508446f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0294354781508446f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0294354781508446f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0294354781508446f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048953001387417f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010352159151807f, .offset= -255}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_2d[] = { "_text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight", "tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0443536192178726f, .offset= -195}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0443536192178726f, .offset= -195}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate[] = { "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0443536192178726f, .offset= -195}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_4 */ Qnn_Param_t params__elementwiseneuron_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_4[] = { "_text_encoder_convnext_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_act_Mul_1_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__elementwiseneuron_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111149195581675f, .offset= -15}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_4", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_4, // Node Params 1, // Num Node Params inputs__elementwiseneuron_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111149195581675f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111149195581675f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055673951283097f, .offset= -146}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006576364976354f, .offset= -120}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_2d[] = { "_text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight", "tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203759241849184f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203759241849184f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate[] = { "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203759241849184f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203759241849184f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_0_gamma[] = {1, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004740175500046f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_Mul_2 */ Qnn_Param_t params__text_encoder_convnext_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_0_Mul_2[] = { "tts_ttl_text_encoder_convnext_convnext_0_gamma", "_text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_convnext_convnext_0_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014720575418323f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_Add */ Qnn_Param_t params__text_encoder_convnext_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_0_Add[] = { "_text_encoder_convnext_convnext_0_Mul_output_0", "_text_encoder_convnext_convnext_0_Mul_2_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_0_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_0_Add, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_0_Mul_3 */ Qnn_Param_t params__text_encoder_convnext_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_0_Mul_3[] = { "_text_encoder_convnext_convnext_0_Add_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_0_Mul_3_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_Mul */ Qnn_Param_t params__text_encoder_convnext_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_1_Mul[] = { "_text_encoder_convnext_convnext_0_Mul_3_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_1_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Pad */ uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__text_encoder_convnext_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_convnext_convnext_1_dwconv_Pad[] = { "_text_encoder_convnext_convnext_1_Mul_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_output_0[] = {1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_convnext_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { "_text_encoder_convnext_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 256, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 256, 1, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022328870836645f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight[] = {1, 5, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043130395933986f, .offset= -107}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002949726185761f, .offset= -162}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_dwconv_Conv_2d[] = { "_text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight", "tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014085628790781f, .offset= -154}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__text_encoder_convnext_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014085628790781f, .offset= -154}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate[] = { "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014085628790781f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014085628790781f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_Mul_1 */ Qnn_Param_t params__text_encoder_convnext_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_1_Mul_1[] = { "_text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_1_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014085628790781f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028470007237047f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024077277630568f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization[] = { "_text_encoder_convnext_convnext_1_norm_Transpose_output_0", "tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight", "tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508155465126f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508155465126f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508155465126f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508155465126f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055695087648928f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012661021901295f, .offset= -203}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_2d[] = { "_text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight", "tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440135598182678f, .offset= -190}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440135598182678f, .offset= -190}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate[] = { "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440135598182678f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_6 */ Qnn_Param_t params__elementwiseneuron_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_6[] = { "_text_encoder_convnext_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_act_Mul_1_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__elementwiseneuron_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119207091629505f, .offset= -14}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_6", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_6, // Node Params 1, // Num Node Params inputs__elementwiseneuron_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119207091629505f, .offset= -14}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119207091629505f, .offset= -14}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054423529654741f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006554584251717f, .offset= -149}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_2d[] = { "_text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight", "tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0291835200041533f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0291835200041533f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate[] = { "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0291835200041533f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0291835200041533f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_1_gamma[] = {1, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006572376005352f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_1_gamma), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_Mul_2 */ Qnn_Param_t params__text_encoder_convnext_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_1_Mul_2[] = { "tts_ttl_text_encoder_convnext_convnext_1_gamma", "_text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_convnext_convnext_1_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028371787630022f, .offset= -148}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_Add */ Qnn_Param_t params__text_encoder_convnext_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_1_Add[] = { "_text_encoder_convnext_convnext_1_Mul_output_0", "_text_encoder_convnext_convnext_1_Mul_2_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_1_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_1_Add, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_1_Mul_3 */ Qnn_Param_t params__text_encoder_convnext_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_1_Mul_3[] = { "_text_encoder_convnext_convnext_1_Add_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_1_Mul_3_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_Mul */ Qnn_Param_t params__text_encoder_convnext_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_2_Mul[] = { "_text_encoder_convnext_convnext_1_Mul_3_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_2_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Pad */ uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__text_encoder_convnext_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_convnext_convnext_2_dwconv_Pad[] = { "_text_encoder_convnext_convnext_2_Mul_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_output_0[] = {1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_convnext_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { "_text_encoder_convnext_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 256, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 256, 1, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040123718790710f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight[] = {1, 5, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039053845684975f, .offset= -117}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002776405599434f, .offset= -141}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_dwconv_Conv_2d[] = { "_text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight", "tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021716335322708f, .offset= -149}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__text_encoder_convnext_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021716335322708f, .offset= -149}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate[] = { "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021716335322708f, .offset= -149}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021716335322708f, .offset= -149}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_Mul_1 */ Qnn_Param_t params__text_encoder_convnext_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_2_Mul_1[] = { "_text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_2_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021716335322708f, .offset= -149}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030639071483165f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039381594397128f, .offset= -178}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization[] = { "_text_encoder_convnext_convnext_2_norm_Transpose_output_0", "tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight", "tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205580443143845f, .offset= -155}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205580443143845f, .offset= -155}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205580443143845f, .offset= -155}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205580443143845f, .offset= -155}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0059638386592269f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014679000014439f, .offset= -209}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_2d[] = { "_text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight", "tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0432293899357319f, .offset= -191}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0432293899357319f, .offset= -191}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate[] = { "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0432293899357319f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_8 */ Qnn_Param_t params__elementwiseneuron_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_8[] = { "_text_encoder_convnext_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_act_Mul_1_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__elementwiseneuron_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0115626668557525f, .offset= -15}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_8", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_8, // Node Params 1, // Num Node Params inputs__elementwiseneuron_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0115626668557525f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0115626668557525f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0070865601301193f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006668278365396f, .offset= -128}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_2d[] = { "_text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight", "tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211853757500648f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211853757500648f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate[] = { "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211853757500648f, .offset= -145}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211853757500648f, .offset= -145}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_2_gamma[] = {1, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010785099584609f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_2_gamma), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_Mul_2 */ Qnn_Param_t params__text_encoder_convnext_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_2_Mul_2[] = { "tts_ttl_text_encoder_convnext_convnext_2_gamma", "_text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_convnext_convnext_2_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038384350482374f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_Add */ Qnn_Param_t params__text_encoder_convnext_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_2_Add[] = { "_text_encoder_convnext_convnext_2_Mul_output_0", "_text_encoder_convnext_convnext_2_Mul_2_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_2_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_2_Add, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_2_Mul_3 */ Qnn_Param_t params__text_encoder_convnext_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_2_Mul_3[] = { "_text_encoder_convnext_convnext_2_Add_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_2_Mul_3_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_Mul */ Qnn_Param_t params__text_encoder_convnext_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_3_Mul[] = { "_text_encoder_convnext_convnext_2_Mul_3_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_3_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Pad */ uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__text_encoder_convnext_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_convnext_convnext_3_dwconv_Pad[] = { "_text_encoder_convnext_convnext_3_Mul_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_output_0[] = {1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_convnext_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { "_text_encoder_convnext_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 256, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 256, 1, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045880447141826f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight[] = {1, 5, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042436453513801f, .offset= -151}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003902013995685f, .offset= -130}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_dwconv_Conv_2d[] = { "_text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight", "tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017871345626190f, .offset= -146}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__text_encoder_convnext_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017871345626190f, .offset= -146}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate[] = { "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017871345626190f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017871345626190f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_Mul_1 */ Qnn_Param_t params__text_encoder_convnext_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_3_Mul_1[] = { "_text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_3_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017871345626190f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034614999312907f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036931217182428f, .offset= -124}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization[] = { "_text_encoder_convnext_convnext_3_norm_Transpose_output_0", "tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight", "tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0169066227972507f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0169066227972507f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0169066227972507f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0169066227972507f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074272099882364f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018637687899172f, .offset= -216}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_2d[] = { "_text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight", "tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0348752960562706f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0348752960562706f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate[] = { "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0348752960562706f, .offset= -198}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_10 */ Qnn_Param_t params__elementwiseneuron_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_10[] = { "_text_encoder_convnext_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_act_Mul_1_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__elementwiseneuron_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083167636767030f, .offset= -20}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_10", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_10, // Node Params 1, // Num Node Params inputs__elementwiseneuron_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083167636767030f, .offset= -20}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083167636767030f, .offset= -20}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0075845625251532f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016875998117030f, .offset= -86}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_2d[] = { "_text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight", "tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144073059782386f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144073059782386f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate[] = { "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144073059782386f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144073059782386f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_3_gamma[] = {1, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018376489169896f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_3_gamma), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_Mul_2 */ Qnn_Param_t params__text_encoder_convnext_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_3_Mul_2[] = { "tts_ttl_text_encoder_convnext_convnext_3_gamma", "_text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_convnext_convnext_3_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042619039304554f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_Add */ Qnn_Param_t params__text_encoder_convnext_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_3_Add[] = { "_text_encoder_convnext_convnext_3_Mul_output_0", "_text_encoder_convnext_convnext_3_Mul_2_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_3_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_3_Add, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_3_Mul_3 */ Qnn_Param_t params__text_encoder_convnext_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_3_Mul_3[] = { "_text_encoder_convnext_convnext_3_Add_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_3_Mul_3_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_Mul */ Qnn_Param_t params__text_encoder_convnext_convnext_4_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_4_Mul[] = { "_text_encoder_convnext_convnext_3_Mul_3_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_4_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_4_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Pad */ uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__text_encoder_convnext_convnext_4_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_convnext_convnext_4_dwconv_Pad[] = { "_text_encoder_convnext_convnext_4_Mul_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_output_0[] = {1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_convnext_convnext_4_dwconv_Pad, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = { "_text_encoder_convnext_convnext_4_dwconv_Pad_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = {1, 256, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d[] = {1, 256, 1, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055576865561306f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight[] = {1, 5, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036128840874881f, .offset= -143}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006931324605830f, .offset= -136}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_dwconv_Conv_2d[] = { "_text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight", "tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018011690117419f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__text_encoder_convnext_convnext_4_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018011690117419f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate[] = { "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018011690117419f, .offset= -145}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_4_dwconv_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018011690117419f, .offset= -145}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_Mul_1 */ Qnn_Param_t params__text_encoder_convnext_convnext_4_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_4_Mul_1[] = { "_text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_4_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018011690117419f, .offset= -145}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_4_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034025176428258f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028706125449389f, .offset= -129}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_norm_norm_LayerNormalization */ uint32_t dimensions__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization[] = { "_text_encoder_convnext_convnext_4_norm_Transpose_output_0", "tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight", "tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157209578901529f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = { "_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157209578901529f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157209578901529f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157209578901529f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074375984258950f, .offset= -144}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024450970813632f, .offset= -196}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv1_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_2d[] = { "_text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight", "tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0393117032945156f, .offset= -177}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0393117032945156f, .offset= -177}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate[] = { "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0393117032945156f, .offset= -177}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_12 */ Qnn_Param_t params__elementwiseneuron_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_12[] = { "_text_encoder_convnext_convnext_4_pwconv1_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_act_Mul_1_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__elementwiseneuron_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126411048695445f, .offset= -13}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_12", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_12, // Node Params 1, // Num Node Params inputs__elementwiseneuron_12, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_4_act_Mul_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126411048695445f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126411048695445f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081587349995971f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025275575462729f, .offset= -103}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv2_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_2d[] = { "_text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight", "tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0259697176516056f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0259697176516056f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate[] = { "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0259697176516056f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0259697176516056f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_4_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_4_gamma[] = {1, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_4_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_4_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023630592040718f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_4_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_4_gamma), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_4_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_Mul_2 */ Qnn_Param_t params__text_encoder_convnext_convnext_4_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_4_Mul_2[] = { "tts_ttl_text_encoder_convnext_convnext_4_gamma", "_text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_convnext_convnext_4_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0112641649320722f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_4_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_Add */ Qnn_Param_t params__text_encoder_convnext_convnext_4_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_4_Add[] = { "_text_encoder_convnext_convnext_4_Mul_output_0", "_text_encoder_convnext_convnext_4_Mul_2_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_4_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0121892392635345f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_4_Add, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_4_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_4_Mul_3 */ Qnn_Param_t params__text_encoder_convnext_convnext_4_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_4_Mul_3[] = { "_text_encoder_convnext_convnext_4_Add_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_4_Mul_3_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_4_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_4_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120038967579603f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_4_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_4_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_4_Mul_3, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_4_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_4_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_Mul */ Qnn_Param_t params__text_encoder_convnext_convnext_5_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_5_Mul[] = { "_text_encoder_convnext_convnext_4_Mul_3_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_5_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120038967579603f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_5_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Pad */ uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__text_encoder_convnext_convnext_5_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__text_encoder_convnext_convnext_5_dwconv_Pad[] = { "_text_encoder_convnext_convnext_5_Mul_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_output_0[] = {1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120038967579603f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_convnext_convnext_5_dwconv_Pad, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = { "_text_encoder_convnext_convnext_5_dwconv_Pad_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = {1, 256, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120038967579603f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d[] = {1, 256, 1, 132}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120038967579603f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 132, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120038967579603f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight[] = {1, 5, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029441085644066f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013105344260111f, .offset= -155}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_dwconv_Conv_2d[] = { "_text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight", "tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035509162116796f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__text_encoder_convnext_convnext_5_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035509162116796f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate[] = { "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035509162116796f, .offset= -112}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_5_dwconv_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035509162116796f, .offset= -112}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_Mul_1 */ Qnn_Param_t params__text_encoder_convnext_convnext_5_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_5_Mul_1[] = { "_text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_5_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035509162116796f, .offset= -112}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_5_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033042507711798f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020134672522545f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_norm_norm_LayerNormalization */ uint32_t dimensions__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization[] = { "_text_encoder_convnext_convnext_5_norm_Transpose_output_0", "tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight", "tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190251395106316f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = { "_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190251395106316f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190251395106316f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190251395106316f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0114952437579632f, .offset= -152}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026017257478088f, .offset= -214}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv1_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_2d[] = { "_text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight", "tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665782690048218f, .offset= -169}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665782690048218f, .offset= -169}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate[] = { "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665782690048218f, .offset= -169}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_14 */ Qnn_Param_t params__elementwiseneuron_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_14[] = { "_text_encoder_convnext_convnext_5_pwconv1_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_act_Mul_1_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__elementwiseneuron_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0231356061995029f, .offset= -7}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_14", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_14, // Node Params 1, // Num Node Params inputs__elementwiseneuron_14, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d */ const char* inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d[] = { "_text_encoder_convnext_convnext_5_act_Mul_1_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0231356061995029f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0231356061995029f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0102361282333732f, .offset= -106}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030281692743301f, .offset= -110}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv2_Conv_2d */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_2d[] = { "_text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight", "tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0479558855295181f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = { "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0479558855295181f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate */ const char* inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate[] = { "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0479558855295181f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = { "_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0479558855295181f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_convnext_convnext_5_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_convnext_convnext_5_gamma[] = {1, 1, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_convnext_convnext_5_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_convnext_convnext_5_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043508573435247f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_text_encoder_convnext_convnext_5_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_convnext_convnext_5_gamma), .dataSize=BINLEN(tts_ttl_text_encoder_convnext_convnext_5_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_Mul_2 */ Qnn_Param_t params__text_encoder_convnext_convnext_5_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_5_Mul_2[] = { "tts_ttl_text_encoder_convnext_convnext_5_gamma", "_text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_convnext_convnext_5_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0253028701990843f, .offset= -113}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_5_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_Add */ Qnn_Param_t params__text_encoder_convnext_convnext_5_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_convnext_convnext_5_Add[] = { "_text_encoder_convnext_convnext_5_Mul_output_0", "_text_encoder_convnext_convnext_5_Mul_2_output_0" }; uint32_t dimensions__text_encoder_convnext_convnext_5_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_5_Add, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_convnext_convnext_5_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_convnext_convnext_5_Mul_3 */ Qnn_Param_t params__text_encoder_convnext_convnext_5_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_convnext_convnext_5_Mul_3[] = { "_text_encoder_convnext_convnext_5_Add_output_0", "text_mask" }; uint32_t dimensions__text_encoder_convnext_convnext_5_Mul_3_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_convnext_convnext_5_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_convnext_convnext_5_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 3, .dimensions=dimensions__text_encoder_convnext_convnext_5_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_convnext_convnext_5_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_convnext_convnext_5_Mul_3, // Node Params 1, // Num Node Params inputs__text_encoder_convnext_convnext_5_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_convnext_convnext_5_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Mul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_Mul_1[] = { "_text_encoder_convnext_convnext_5_Mul_3_output_0", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_Mul_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Mul_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_Mul_1_output_0_ncf[] = { "_text_encoder_attn_encoder_Mul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_Mul_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033146953210235f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040646390989423f, .offset= -129}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0601664744317532f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0601664744317532f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0601664744317532f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036138310097158f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000505998395965f, .offset= -125}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0506854578852654f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0506854578852654f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0506854578852654f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308190658688545f, .offset= -97}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025421255268157f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009569339454174f, .offset= -134}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187407676130533f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187407676130533f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187407676130533f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Transpose */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Transpose[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0601664744317532f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Transpose, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Transpose_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_1_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Transpose_1[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187407676130533f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Transpose_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0[] = {1}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0313725508749485f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Div */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Div[] = { "_text_encoder_attn_encoder_attn_layers_0_Transpose_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Div_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0075208093039691f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Div, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_MatMul */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_MatMul[] = { "_text_encoder_attn_encoder_attn_layers_0_Div_output_0", "_text_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3933258354663849f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_MatMul, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw[] = {1, 1, 64, 255}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028889917302877f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_MatMul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1[] = { "_text_encoder_attn_encoder_attn_layers_0_Div_output_0", "_text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_MatMul_1, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount[] = {0, 0, 0, 0, 0, 1, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_1[] = { "_text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0[] = {1, 128, 256, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_1", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_1, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_7 */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_2 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount[] = {0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_2[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0[] = {1, 32895, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_2", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_2, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = {1, 4, 32895}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_10 */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_10[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0[] = {1, 4, 129, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_Slice_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR Slice_0 */ uint32_t dimensions_Slice_0_ranges[] = {4, 3}; int32_t Slice_0_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 127, 255, 1}; Qnn_Param_t params_Slice_0[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "Slice_0_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_Slice_0_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)Slice_0_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs_Slice_0[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0" }; uint32_t dimensions__v_890[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs_Slice_0[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_v_890", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159389134496450f, .offset= -181}}}, .rank= 4, .dimensions=dimensions__v_890, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "Slice_0", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params_Slice_0, // Node Params 5, // Num Node Params inputs_Slice_0, // Input Tensor Names 1, // Num Input Tensor Names outputs_Slice_0, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Add_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Add_2[] = { "_text_encoder_attn_encoder_attn_layers_0_MatMul_output_0", "_v_890" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Add_2_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Add_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3970485031604767f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Add_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Add_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0[] = {1}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 39.2156867980957031f, .offset= -255}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Where */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_Where[] = { "_text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", "_text_encoder_attn_encoder_attn_layers_0_Add_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Where_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 39.4126014709472656f, .offset= -254}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Softmax */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Softmax[] = { "_text_encoder_attn_encoder_attn_layers_0_Where_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Softmax, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = {1, 128, 128, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_MatMul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_MatMul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_2[] = { "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0", "_text_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186980962753296f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_MatMul_2", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_MatMul_2, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_3 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount[] = {0, 0, 0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_3[] = { "_text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_3", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_3, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_13 */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0[] = {1, 4, 32640}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_13", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = {1, 32640, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_4 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount[] = {0, 0, 128, 0, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_4[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_4", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_4, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_16 */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_16[] = { "_text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_16", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Slice_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Slice_8 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Slice_8_ranges[] = {4, 3}; int32_t _text_encoder_attn_encoder_attn_layers_0_Slice_8_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 1, 256, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Slice_8[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Slice_8_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Slice_8_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Slice_8_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Slice_8[] = { "_text_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Slice_8_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Slice_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Slice_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Slice_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Slice_8", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Slice_8, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Slice_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Slice_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw[] = {1, 1, 255, 64}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039676283486187f, .offset= -149}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_MatMul_3 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_MatMul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_3[] = { "_text_encoder_attn_encoder_attn_layers_0_Slice_8_output_0", "_text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038471613079309f, .offset= -146}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_MatMul_3", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_MatMul_3, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_MatMul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_MatMul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Add_4 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Add_4[] = { "_text_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0", "_text_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Add_4_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Add_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187045671045780f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Add_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Add_4, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Transpose_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Transpose_9 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_9_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_Transpose_9_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_Transpose_9[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_9_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_9_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_Transpose_9_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_Transpose_9[] = { "_text_encoder_attn_encoder_attn_layers_0_Add_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Transpose_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187045671045780f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Transpose_9", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_Transpose_9, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Transpose_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Transpose_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_19(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_Reshape_19 */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_19[] = { "_text_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_19[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187045671045780f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_Reshape_19", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_Reshape_19, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_Reshape_19, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187045671045780f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025607591960579f, .offset= -150}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012910888763145f, .offset= -144}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0176635794341564f, .offset= -110}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0176635794341564f, .offset= -110}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0176635794341564f, .offset= -110}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0176635794341564f, .offset= -110}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add */ Qnn_Param_t params__text_encoder_attn_encoder_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add[] = { "_text_encoder_attn_encoder_Mul_1_output_0", "_text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0320131517946720f, .offset= -102}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016559431096539f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010400067549199f, .offset= -137}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085915494710207f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Mul */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Mul[] = { "_text_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085915494710207f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085915494710207f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085915494710207f, .offset= -70}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085915494710207f, .offset= -70}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0063623008318245f, .offset= -147}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019895781297237f, .offset= -206}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0130557501688600f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0130557501688600f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0130557501688600f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Relu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Relu */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Relu[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Relu[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Relu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043770535849035f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Relu", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Relu, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Relu, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Relu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043770535849035f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Mul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1[] = { "_text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043770535849035f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043770535849035f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043770535849035f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043770535849035f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0065387329086661f, .offset= -112}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016064491355792f, .offset= -147}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225699990987778f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225699990987778f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225699990987778f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225699990987778f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_0_Mul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_2[] = { "_text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225699990987778f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_0_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_1 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_1[] = { "_text_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0", "_text_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225205067545176f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020990900229663f, .offset= -92}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025229139719158f, .offset= -156}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = { "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0051692309789360f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054281954653561f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270125567913055f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270125567913055f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270125567913055f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042594773694873f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000508762786922f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0107978666201234f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0107978666201234f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0107978666201234f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055574835278094f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036650204565376f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010743860621005f, .offset= -122}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143751781433821f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143751781433821f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143751781433821f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Transpose */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Transpose[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270125567913055f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Transpose, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Transpose_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_1_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Transpose_1[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143751781433821f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Transpose_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Div */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Div[] = { "_text_encoder_attn_encoder_attn_layers_1_Transpose_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Div_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033765695989132f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Div, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_MatMul */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_MatMul[] = { "_text_encoder_attn_encoder_attn_layers_1_Div_output_0", "_text_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0130080049857497f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_MatMul, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw[] = {1, 1, 64, 255}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069775325246155f, .offset= -118}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_MatMul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1[] = { "_text_encoder_attn_encoder_attn_layers_1_Div_output_0", "_text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_MatMul_1, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount[] = {0, 0, 0, 0, 0, 1, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_1[] = { "_text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0[] = {1, 128, 256, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_1", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_1, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_7 */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_2 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount[] = {0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_2[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0[] = {1, 32895, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_2", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_2, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = {1, 4, 32895}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_10 */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_10[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0[] = {1, 4, 129, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR Slice_1 */ uint32_t dimensions_Slice_1_ranges[] = {4, 3}; int32_t Slice_1_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 127, 255, 1}; Qnn_Param_t params_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)Slice_1_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs_Slice_1[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0" }; uint32_t dimensions__v_895[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_v_895", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0263884905725718f, .offset= -56}}}, .rank= 4, .dimensions=dimensions__v_895, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params_Slice_1, // Node Params 5, // Num Node Params inputs_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Add_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Add_2[] = { "_text_encoder_attn_encoder_attn_layers_1_MatMul_output_0", "_v_895" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Add_2_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Add_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0351645871996880f, .offset= -66}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Add_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Add_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Where */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_Where[] = { "_text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", "_text_encoder_attn_encoder_attn_layers_1_Add_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Where_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 39.2416915893554688f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Softmax */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Softmax[] = { "_text_encoder_attn_encoder_attn_layers_1_Where_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Softmax, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = {1, 128, 128, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_MatMul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_MatMul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_2[] = { "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0", "_text_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142002431675792f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_MatMul_2", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_MatMul_2, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_3 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount[] = {0, 0, 0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_3[] = { "_text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_3", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_3, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_13 */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0[] = {1, 4, 32640}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_13", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = {1, 32640, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_4 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount[] = {0, 0, 128, 0, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_4[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_4", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_4, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_16 */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_16[] = { "_text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_16", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Slice_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Slice_8 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Slice_8_ranges[] = {4, 3}; int32_t _text_encoder_attn_encoder_attn_layers_1_Slice_8_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 1, 256, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Slice_8[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Slice_8_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Slice_8_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Slice_8_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Slice_8[] = { "_text_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Slice_8_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Slice_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Slice_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Slice_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Slice_8", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Slice_8, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Slice_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Slice_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw[] = {1, 1, 255, 64}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052806804887950f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_MatMul_3 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_MatMul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_3[] = { "_text_encoder_attn_encoder_attn_layers_1_Slice_8_output_0", "_text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033798634540290f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_MatMul_3", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_MatMul_3, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_MatMul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_MatMul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Add_4 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Add_4[] = { "_text_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0", "_text_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Add_4_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Add_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143287880346179f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Add_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Add_4, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Transpose_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Transpose_9 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_9_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_Transpose_9_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_Transpose_9[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_9_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_9_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_Transpose_9_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_Transpose_9[] = { "_text_encoder_attn_encoder_attn_layers_1_Add_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Transpose_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143287880346179f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Transpose_9", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_Transpose_9, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Transpose_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Transpose_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_19(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_Reshape_19 */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_19[] = { "_text_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_19[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143287880346179f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_Reshape_19", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_Reshape_19, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_Reshape_19, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143287880346179f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035392281133682f, .offset= -124}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011419841321185f, .offset= -125}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153647204861045f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153647204861045f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153647204861045f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153647204861045f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_2 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_2[] = { "_text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0", "_text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0160207170993090f, .offset= -141}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017968829488382f, .offset= -94}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020343782380223f, .offset= -122}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042351065203547f, .offset= -103}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Mul */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Mul[] = { "_text_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042351065203547f, .offset= -103}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042351065203547f, .offset= -103}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042351065203547f, .offset= -103}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042351065203547f, .offset= -103}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049455128610134f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008132941438816f, .offset= -193}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119786281138659f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119786281138659f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119786281138659f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Relu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Relu */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Relu[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Relu[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Relu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029400507919490f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Relu", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Relu, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Relu, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Relu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029400507919490f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Mul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1[] = { "_text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029400507919490f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029400507919490f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029400507919490f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029400507919490f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044963476248085f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012102284235880f, .offset= -115}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037361390423030f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037361390423030f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037361390423030f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037361390423030f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_1_Mul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_2[] = { "_text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037361390423030f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_1_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_3 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_3[] = { "_text_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0", "_text_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0051663992926478f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_3, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016388839576393f, .offset= -65}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016850014217198f, .offset= -145}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf[] = { "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054630157537758f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042106732726097f, .offset= -134}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0241206865757704f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0241206865757704f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0241206865757704f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045181335881352f, .offset= -131}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000512079677719f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0100196059793234f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0100196059793234f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_1_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0100196059793234f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077934199944139f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042618629522622f, .offset= -136}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009470850345679f, .offset= -118}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083792172372341f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083792172372341f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_2_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083792172372341f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Transpose */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Transpose[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0241206865757704f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Transpose, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Transpose_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_1_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Transpose_1[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_1_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083792172372341f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Transpose_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Div */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Div[] = { "_text_encoder_attn_encoder_attn_layers_2_Transpose_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Div_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030150858219713f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Div, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_MatMul */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_MatMul[] = { "_text_encoder_attn_encoder_attn_layers_2_Div_output_0", "_text_encoder_attn_encoder_attn_layers_2_Reshape_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126462196931243f, .offset= -118}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_MatMul, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw[] = {1, 1, 64, 255}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062393224798143f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_MatMul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1[] = { "_text_encoder_attn_encoder_attn_layers_2_Div_output_0", "_text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_MatMul_1, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_1_pad_amount[] = {0, 0, 0, 0, 0, 1, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_1_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_1_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_1[] = { "_text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0[] = {1, 128, 256, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_1", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_1, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_7 */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_2 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_2_pad_amount[] = {0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_2_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_2_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_2[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0[] = {1, 32895, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_2", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_2, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf[] = {1, 4, 32895}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_10 */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_10[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_10_output_0[] = {1, 4, 129, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_Slice_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR Slice_2 */ uint32_t dimensions_Slice_2_ranges[] = {4, 3}; int32_t Slice_2_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 127, 255, 1}; Qnn_Param_t params_Slice_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "Slice_2_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_Slice_2_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)Slice_2_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs_Slice_2[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_10_output_0" }; uint32_t dimensions__v_900[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs_Slice_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_v_900", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0328931398689747f, .offset= -52}}}, .rank= 4, .dimensions=dimensions__v_900, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "Slice_2", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params_Slice_2, // Node Params 5, // Num Node Params inputs_Slice_2, // Input Tensor Names 1, // Num Input Tensor Names outputs_Slice_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Add_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Add_2[] = { "_text_encoder_attn_encoder_attn_layers_2_MatMul_output_0", "_v_900" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Add_2_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Add_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350487641990185f, .offset= -42}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Add_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Add_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Where */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_Where[] = { "_text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", "_text_encoder_attn_encoder_attn_layers_2_Add_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Where_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 39.2422409057617188f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Softmax */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Softmax[] = { "_text_encoder_attn_encoder_attn_layers_2_Where_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Softmax, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc[] = {1, 128, 128, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_MatMul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_MatMul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_2[] = { "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0", "_text_encoder_attn_encoder_attn_layers_2_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_2_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_MatMul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081261992454529f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_MatMul_2", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_MatMul_2, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_3 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_3_pad_amount[] = {0, 0, 0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_3_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_3_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_3[] = { "_text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_3", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_3, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_13 */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0[] = {1, 4, 32640}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_13", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc[] = {1, 32640, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_4 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_4_pad_amount[] = {0, 0, 128, 0, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_4_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_4_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_4[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_4", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_4, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_16 */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_16[] = { "_text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_16_output_0[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Reshape_16_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Reshape_16_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_16", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Slice_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Slice_8 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Slice_8_ranges[] = {4, 3}; int32_t _text_encoder_attn_encoder_attn_layers_2_Slice_8_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 1, 256, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Slice_8[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Slice_8_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Slice_8_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Slice_8_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Slice_8[] = { "_text_encoder_attn_encoder_attn_layers_2_Reshape_16_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Slice_8_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Slice_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Slice_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Slice_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Slice_8", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Slice_8, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Slice_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Slice_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw[] = {1, 1, 255, 64}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052390014752746f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_MatMul_3 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_MatMul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_3[] = { "_text_encoder_attn_encoder_attn_layers_2_Slice_8_output_0", "_text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_3_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_MatMul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034844279289246f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_MatMul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_MatMul_3", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_MatMul_3, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_MatMul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_MatMul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Add_4 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Add_4[] = { "_text_encoder_attn_encoder_attn_layers_2_MatMul_2_output_0", "_text_encoder_attn_encoder_attn_layers_2_MatMul_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Add_4_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Add_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079501606523991f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Add_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Add_4, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Transpose_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Transpose_9 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_9_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_Transpose_9_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_Transpose_9[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_9_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_9_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_Transpose_9_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_Transpose_9[] = { "_text_encoder_attn_encoder_attn_layers_2_Add_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_9_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Transpose_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_Transpose_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079501606523991f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_Transpose_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Transpose_9", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_Transpose_9, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Transpose_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Transpose_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_19(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_Reshape_19 */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_19[] = { "_text_encoder_attn_encoder_attn_layers_2_Transpose_9_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_19[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079501606523991f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_Reshape_19", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_Reshape_19, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_Reshape_19, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079501606523991f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040457658469677f, .offset= -142}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014776001917198f, .offset= -119}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181071590632200f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181071590632200f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181071590632200f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181071590632200f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_4 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_4[] = { "_text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0", "_text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_2_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_2_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181310530751944f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_2_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_4, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015434514498338f, .offset= -21}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026713011320680f, .offset= -113}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_1_2_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_2_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_2_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074344347231090f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_2_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Mul */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Mul[] = { "_text_encoder_attn_encoder_norm_layers_1_2_Transpose_1_output_0", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043406523764133f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043406523764133f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043406523764133f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043406523764133f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055005843751132f, .offset= -142}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009947851067409f, .offset= -173}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0129429167136550f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0129429167136550f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0129429167136550f, .offset= -198}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Relu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Relu */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Relu[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Relu[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Relu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028976013418287f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Relu", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Relu, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Relu, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Relu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028976013418287f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Mul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1[] = { "_text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028976013418287f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028976013418287f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028976013418287f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028976013418287f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091291349381208f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012725603301078f, .offset= -110}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040946360677481f, .offset= -111}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040946360677481f, .offset= -111}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040946360677481f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040946360677481f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_2_Mul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_2[] = { "_text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040946360677481f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_2_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_5 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_5[] = { "_text_encoder_attn_encoder_norm_layers_1_2_Transpose_1_output_0", "_text_encoder_attn_encoder_ffn_layers_2_Mul_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074344347231090f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_5", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_5, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_5, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017707579536363f, .offset= -99}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025352207012475f, .offset= -108}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf[] = { "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0065272985957563f, .offset= -114}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0057472842745483f, .offset= -128}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250326264649630f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250326264649630f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250326264649630f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052782143466175f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000505371062900f, .offset= -128}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0109326168894768f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0109326168894768f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_1_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0109326168894768f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0122814215719700f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036392181646079f, .offset= -113}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012555452995002f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0076424423605204f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0076424423605204f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_2_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0076424423605204f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Transpose */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Transpose[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250326264649630f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Transpose, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Transpose_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_1_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Transpose_1[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_1_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0076424423605204f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Transpose_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Div */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Div[] = { "_text_encoder_attn_encoder_attn_layers_3_Transpose_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Div_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031290783081204f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Div, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_MatMul */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_MatMul[] = { "_text_encoder_attn_encoder_attn_layers_3_Div_output_0", "_text_encoder_attn_encoder_attn_layers_3_Reshape_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0115016419440508f, .offset= -96}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_MatMul, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw[] = {1, 1, 64, 255}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0086542498320341f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_MatMul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1[] = { "_text_encoder_attn_encoder_attn_layers_3_Div_output_0", "_text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_MatMul_1, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_1 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_1_pad_amount[] = {0, 0, 0, 0, 0, 1, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_1_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_1_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_1[] = { "_text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0[] = {1, 128, 256, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_1", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_1, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_7 */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_2 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_2_pad_amount[] = {0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_2_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_2_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_2[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0[] = {1, 32895, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_2", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_2, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf[] = {1, 4, 32895}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_10 */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_10[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_10_output_0[] = {1, 4, 129, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_Slice_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR Slice_3 */ uint32_t dimensions_Slice_3_ranges[] = {4, 3}; int32_t Slice_3_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 127, 255, 1}; Qnn_Param_t params_Slice_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "Slice_3_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_Slice_3_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)Slice_3_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs_Slice_3[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_10_output_0" }; uint32_t dimensions__v_905[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs_Slice_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_v_905", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0409761816263199f, .offset= -94}}}, .rank= 4, .dimensions=dimensions__v_905, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "Slice_3", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params_Slice_3, // Node Params 5, // Num Node Params inputs_Slice_3, // Input Tensor Names 1, // Num Input Tensor Names outputs_Slice_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Add_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Add_2[] = { "_text_encoder_attn_encoder_attn_layers_3_MatMul_output_0", "_v_905" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Add_2_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Add_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0432433895766735f, .offset= -76}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Add_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Add_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Where */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_Where[] = { "_text_encoder_attn_encoder_attn_layers_3_Cast_5_output_0", "_text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", "_text_encoder_attn_encoder_attn_layers_3_Add_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Where_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 39.2459793090820312f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Softmax */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Softmax[] = { "_text_encoder_attn_encoder_attn_layers_3_Where_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0[] = {1, 4, 128, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Softmax, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc[] = {1, 128, 128, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_MatMul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_MatMul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_2[] = { "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0", "_text_encoder_attn_encoder_attn_layers_3_Transpose_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_2_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_MatMul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0071464367210865f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_MatMul_2", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_MatMul_2, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_3 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_pad_amount[] = {4, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_3_pad_amount[] = {0, 0, 0, 0, 0, 127, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_3_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_3_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_3[] = { "_text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0[] = {1, 128, 255, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_3", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_3, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_13 */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0[] = {1, 4, 32640}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_13", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc[] = {1, 32640, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_4 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_pad_amount[] = {3, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_4_pad_amount[] = {0, 0, 128, 0, 0, 0}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_4_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_4_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_4[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0[] = {1, 32768, 4}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_4", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_4, // Node Params 3, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf[] = {1, 4, 32768}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_16 */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_16[] = { "_text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_16_output_0[] = {1, 4, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Reshape_16_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Reshape_16_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_16", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Slice_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Slice_8 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Slice_8_ranges[] = {4, 3}; int32_t _text_encoder_attn_encoder_attn_layers_3_Slice_8_ranges[] = {0, 1, 1, 0, 4, 1, 0, 128, 1, 1, 256, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Slice_8[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Slice_8_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Slice_8_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Slice_8_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Slice_8[] = { "_text_encoder_attn_encoder_attn_layers_3_Reshape_16_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Slice_8_output_0[] = {1, 4, 128, 255}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Slice_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Slice_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Slice_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Slice_8", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Slice_8, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Slice_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Slice_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw[] = {1, 1, 255, 64}; VALIDATE(model.addTensor("_text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052294163033366f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw), .dataSize=BINLEN(_text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_MatMul_3 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_MatMul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_3[] = { "_text_encoder_attn_encoder_attn_layers_3_Slice_8_output_0", "_text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_3_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_MatMul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027310703881085f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_MatMul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_MatMul_3", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_MatMul_3, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_MatMul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_MatMul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Add_4 */ Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Add_4[] = { "_text_encoder_attn_encoder_attn_layers_3_MatMul_2_output_0", "_text_encoder_attn_encoder_attn_layers_3_MatMul_3_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Add_4_output_0[] = {1, 4, 128, 64}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Add_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061508808284998f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Add_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Add_4, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Transpose_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Transpose_9 */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_9_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_Transpose_9_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_Transpose_9[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_9_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_9_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_Transpose_9_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_Transpose_9[] = { "_text_encoder_attn_encoder_attn_layers_3_Add_4_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_9_output_0[] = {1, 4, 64, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Transpose_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_Transpose_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061508808284998f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_Transpose_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Transpose_9", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_Transpose_9, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Transpose_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Transpose_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_19(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_Reshape_19 */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_19[] = { "_text_encoder_attn_encoder_attn_layers_3_Transpose_9_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_19[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061508808284998f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_Reshape_19", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_Reshape_19, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_Reshape_19, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061508808284998f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight[] = {1, 1, 256, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0064112017862499f, .offset= -139}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010849093087018f, .offset= -149}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight", "tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0158431679010391f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0158431679010391f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0158431679010391f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0158431679010391f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_6 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_6[] = { "_text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0", "_text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_3_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_3_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0185408927500248f, .offset= -152}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_3_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_6", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_6, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_6, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021504836622626f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023441885132343f, .offset= -122}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_1_3_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_1_3_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_1_3_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074446341022849f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_1_3_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Mul */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Mul[] = { "_text_encoder_attn_encoder_norm_layers_1_3_Transpose_1_output_0", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066889766603708f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066889766603708f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066889766603708f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066889766603708f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight[] = {1, 1, 256, 1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0113202538341284f, .offset= -131}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021227377001196f, .offset= -219}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182306878268719f, .offset= -231}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182306878268719f, .offset= -231}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182306878268719f, .offset= -231}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Relu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Relu */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Relu[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Relu[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Relu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017100907862186f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Relu", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Relu, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Relu, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Relu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017100907862186f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Mul_1 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1[] = { "_text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0[] = {1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017100907862186f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Mul_1, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf[] = { "_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf[] = {1, 1024, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017100907862186f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d */ const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d[] = { "_text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d[] = {1, 1024, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017100907862186f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc[] = {1, 1, 128, 1024}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017100907862186f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight[] = {1, 1, 1024, 256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0177959650754929f, .offset= -112}}}, .rank= 4, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009996886365116f, .offset= -143}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_dilation[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_stride[] = {2}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc", "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight", "tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074010943062603f, .offset= -73}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d, // Node Params 5, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw[] = {1, 256, 1, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074010943062603f, .offset= -73}}}, .rank= 4, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate */ const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0[] = {1, 256, 128}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074010943062603f, .offset= -73}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc */ uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc_perm[] = {3}; uint32_t _text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074010943062603f, .offset= -73}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_ffn_layers_3_Mul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_ffn_layers_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_2[] = { "_text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_ffn_layers_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074010943062603f, .offset= -73}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_ffn_layers_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_ffn_layers_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_ffn_layers_3_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_ffn_layers_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_ffn_layers_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Add_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Add_7 */ Qnn_Param_t params__text_encoder_attn_encoder_Add_7[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_attn_encoder_Add_7[] = { "_text_encoder_attn_encoder_norm_layers_1_3_Transpose_1_output_0", "_text_encoder_attn_encoder_ffn_layers_3_Mul_2_output_0" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_3_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Add_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_3_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111675439402461f, .offset= -107}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_3_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Add_7", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Add_7, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Add_7, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Add_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039423624984920f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025068933609873f, .offset= -102}}}, .rank= 1, .dimensions=dimensions_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias), .dataSize=BINLEN(tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization */ uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization_axes[] = {1}; uint32_t _text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization[] = { "_text_encoder_attn_encoder_norm_layers_2_3_Transpose_output_0", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight", "tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias" }; uint32_t dimensions__text_encoder_attn_encoder_norm_layers_2_3_Transpose_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_norm_layers_2_3_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0543869696557522f, .offset= -149}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_norm_layers_2_3_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_attn_encoder_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_attn_encoder_Mul_2 */ Qnn_Param_t params__text_encoder_attn_encoder_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_attn_encoder_Mul_2[] = { "_text_encoder_attn_encoder_norm_layers_2_3_Transpose_1_output_0", "text_mask" }; uint32_t dimensions__text_encoder_attn_encoder_Mul_2_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_attn_encoder_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_attn_encoder_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0387258753180504f, .offset= -153}}}, .rank= 3, .dimensions=dimensions__text_encoder_attn_encoder_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_attn_encoder_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_attn_encoder_Mul_2, // Node Params 1, // Num Node Params inputs__text_encoder_attn_encoder_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_attn_encoder_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_Add */ Qnn_Param_t params__text_encoder_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__text_encoder_Add[] = { "_text_encoder_attn_encoder_Mul_2_output_0", "_text_encoder_convnext_convnext_5_Mul_3_output_0" }; uint32_t dimensions__text_encoder_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_text_encoder_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0635695382952690f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__text_encoder_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_Add, // Node Params 1, // Num Node Params inputs__text_encoder_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__text_encoder_proj_out_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _text_encoder_proj_out_Mul */ Qnn_Param_t params__text_encoder_proj_out_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__text_encoder_proj_out_Mul[] = { "_text_encoder_Add_output_0", "text_mask" }; uint32_t dimensions__speech_prompted_text_encoder_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__text_encoder_proj_out_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0635695382952690f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_text_encoder_proj_out_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__text_encoder_proj_out_Mul, // Node Params 1, // Num Node Params inputs__text_encoder_proj_out_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__text_encoder_proj_out_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape */ const char* inputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape[] = { "_speech_prompted_text_encoder_Transpose_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0635695382952690f, .offset= -139}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3678(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3678[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3678", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3678", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050055668689311f, .offset= -139}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3678, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3678), .dataSize=BINLEN(onnx__MatMul_3678)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045613148249686f, .offset= -117}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_W_query_linear_MatMul */ const char* inputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul[] = { "_speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3678", "tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape */ const char* inputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape[] = { "_speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Split */ uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_split_index[] = {1}; uint32_t _speech_prompted_text_encoder_attention1_Split_split_index[] = {128}; Qnn_Param_t params__speech_prompted_text_encoder_attention1_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_attention1_Split_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Split[] = { "_speech_prompted_text_encoder_attention1_W_query_linear_Add_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_output_0[] = {1, 128, 128}; uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_output_1[] = {1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Split, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Split, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Unsqueeze */ const char* inputs__speech_prompted_text_encoder_attention1_Unsqueeze[] = { "_speech_prompted_text_encoder_attention1_Split_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_output_0[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Unsqueeze_1 */ const char* inputs__speech_prompted_text_encoder_attention1_Unsqueeze_1[] = { "_speech_prompted_text_encoder_attention1_Split_output_1" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_1_output_0[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Concat */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Concat[] = { "_speech_prompted_text_encoder_attention1_Unsqueeze_output_0", "_speech_prompted_text_encoder_attention1_Unsqueeze_1_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Concat_output_0[] = {2, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0964742600917816f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Concat, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw[] = {2, 1, 128, 50}; VALIDATE(model.addTensor("_speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078430017456412f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw), .dataSize=BINLEN(_speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_MatMul */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_MatMul[] = { "_speech_prompted_text_encoder_attention1_Concat_output_0", "_speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_MatMul_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1.8921014070510864f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__speech_prompted_text_encoder_attention1_MatMul, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention1_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__speech_prompted_text_encoder_attention1_Constant_9_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__speech_prompted_text_encoder_attention1_Constant_9_output_0[] = {1}; VALIDATE(model.addTensor("_speech_prompted_text_encoder_attention1_Constant_9_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Constant_9_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0627451017498970f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Constant_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_speech_prompted_text_encoder_attention1_Constant_9_output_0), .dataSize=BINLEN(_speech_prompted_text_encoder_attention1_Constant_9_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Div */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Div[] = { "_speech_prompted_text_encoder_attention1_MatMul_output_0", "_speech_prompted_text_encoder_attention1_Constant_9_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Div_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1182563379406929f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Div, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Softmax */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Softmax[] = { "_speech_prompted_text_encoder_attention1_Div_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Softmax_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Softmax, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Where */ const char* inputs__speech_prompted_text_encoder_attention1_Where[] = { "_speech_prompted_text_encoder_attention2_Cast_output_0", "_speech_prompted_text_encoder_attention1_Constant_11_output_0", "_speech_prompted_text_encoder_attention1_Softmax_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Where_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009158538305201f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_MatMul_1 */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_MatMul_1[] = { "_speech_prompted_text_encoder_attention1_Where_output_0", "_speech_prompted_text_encoder_attention1_Concat_2_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_MatMul_1_output_0[] = {2, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010703827720135f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__speech_prompted_text_encoder_attention1_MatMul_1, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention1_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Split_3 */ uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_3_split_index[] = {1}; uint32_t _speech_prompted_text_encoder_attention1_Split_3_split_index[] = {1}; Qnn_Param_t params__speech_prompted_text_encoder_attention1_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_attention1_Split_3_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Split_3[] = { "_speech_prompted_text_encoder_attention1_MatMul_1_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_3_output_0[] = {1, 1, 128, 128}; uint32_t dimensions__speech_prompted_text_encoder_attention1_Split_3_output_1[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010703827720135f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010703827720135f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Split_3, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Split_3, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Concat_3 */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Concat_3[] = { "_speech_prompted_text_encoder_attention1_Split_3_output_0", "_speech_prompted_text_encoder_attention1_Split_3_output_1" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Concat_3_output_0[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010703827720135f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Concat_3, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Squeeze */ const char* inputs__speech_prompted_text_encoder_attention1_Squeeze[] = { "_speech_prompted_text_encoder_attention1_Concat_3_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010703827720135f, .offset= -131}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3681(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3681[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3681", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3681", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056947390548885f, .offset= -125}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3681, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3681), .dataSize=BINLEN(onnx__MatMul_3681)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029470019508153f, .offset= -144}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_out_fc_linear_MatMul */ const char* inputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul[] = { "_speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3681", "tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0101993391290307f, .offset= -137}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape */ const char* inputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape[] = { "_speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0101993391290307f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention1_Mul */ Qnn_Param_t params__speech_prompted_text_encoder_attention1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__speech_prompted_text_encoder_attention1_Mul[] = { "_speech_prompted_text_encoder_attention1_out_fc_linear_Add_output_0", "text_mask" }; uint32_t dimensions__speech_prompted_text_encoder_attention1_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0101993391290307f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_attention1_Mul, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_Add */ Qnn_Param_t params__speech_prompted_text_encoder_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_Add[] = { "_speech_prompted_text_encoder_attention1_Mul_output_0", "_speech_prompted_text_encoder_Transpose_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0650492459535599f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_Add, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape */ const char* inputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape[] = { "_speech_prompted_text_encoder_Add_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0650492459535599f, .offset= -142}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3682(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3682[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3682", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3682", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062970272265375f, .offset= -118}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3682, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3682), .dataSize=BINLEN(onnx__MatMul_3682)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035397338215262f, .offset= -119}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_W_query_linear_MatMul */ const char* inputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul[] = { "_speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3682", "tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape */ const char* inputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape[] = { "_speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Split */ uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_split_index[] = {1}; uint32_t _speech_prompted_text_encoder_attention2_Split_split_index[] = {128}; Qnn_Param_t params__speech_prompted_text_encoder_attention2_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_attention2_Split_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Split[] = { "_speech_prompted_text_encoder_attention2_W_query_linear_Add_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_output_0[] = {1, 128, 128}; uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_output_1[] = {1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Split, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Split, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Unsqueeze */ const char* inputs__speech_prompted_text_encoder_attention2_Unsqueeze[] = { "_speech_prompted_text_encoder_attention2_Split_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_output_0[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Unsqueeze_1 */ const char* inputs__speech_prompted_text_encoder_attention2_Unsqueeze_1[] = { "_speech_prompted_text_encoder_attention2_Split_output_1" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_1_output_0[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Concat */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Concat[] = { "_speech_prompted_text_encoder_attention2_Unsqueeze_output_0", "_speech_prompted_text_encoder_attention2_Unsqueeze_1_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Concat_output_0[] = {2, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1243563517928123f, .offset= -107}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Concat, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw[] = {2, 1, 128, 50}; VALIDATE(model.addTensor("_speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078412294387817f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw), .dataSize=BINLEN(_speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_MatMul */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_MatMul[] = { "_speech_prompted_text_encoder_attention2_Concat_output_0", "_speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_MatMul_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 2.7499637603759766f, .offset= -106}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__speech_prompted_text_encoder_attention2_MatMul, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention2_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Div */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Div[] = { "_speech_prompted_text_encoder_attention2_MatMul_output_0", "_speech_prompted_text_encoder_attention1_Constant_9_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Div_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1718727350234985f, .offset= -106}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Div, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Softmax */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Softmax[] = { "_speech_prompted_text_encoder_attention2_Div_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Softmax_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Softmax, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Where */ const char* inputs__speech_prompted_text_encoder_attention2_Where[] = { "_speech_prompted_text_encoder_attention2_Cast_output_0", "_speech_prompted_text_encoder_attention1_Constant_11_output_0", "_speech_prompted_text_encoder_attention2_Softmax_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Where_output_0[] = {2, 1, 128, 50}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035826915409416f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_MatMul_1 */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_MatMul_1[] = { "_speech_prompted_text_encoder_attention2_Where_output_0", "_speech_prompted_text_encoder_attention2_Concat_2_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_MatMul_1_output_0[] = {2, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050699342973530f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__speech_prompted_text_encoder_attention2_MatMul_1, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention2_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Split_3 */ uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_3_split_index[] = {1}; uint32_t _speech_prompted_text_encoder_attention2_Split_3_split_index[] = {1}; Qnn_Param_t params__speech_prompted_text_encoder_attention2_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_attention2_Split_3_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Split_3[] = { "_speech_prompted_text_encoder_attention2_MatMul_1_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_3_output_0[] = {1, 1, 128, 128}; uint32_t dimensions__speech_prompted_text_encoder_attention2_Split_3_output_1[] = {1, 1, 128, 128}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050699342973530f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050699342973530f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Split_3, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Split_3, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Concat_3 */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Concat_3[] = { "_speech_prompted_text_encoder_attention2_Split_3_output_0", "_speech_prompted_text_encoder_attention2_Split_3_output_1" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Concat_3_output_0[] = {1, 1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050699342973530f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Concat_3, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Squeeze */ const char* inputs__speech_prompted_text_encoder_attention2_Squeeze[] = { "_speech_prompted_text_encoder_attention2_Concat_3_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050699342973530f, .offset= -145}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3685(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3685[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3685", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3685", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069792815484107f, .offset= -133}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3685, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3685), .dataSize=BINLEN(onnx__MatMul_3685)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017634298419580f, .offset= -114}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_out_fc_linear_MatMul */ const char* inputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul[] = { "_speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3685", "tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144438156858087f, .offset= -127}}}, .rank= 2, .dimensions=dimensions__speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape */ const char* inputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape[] = { "_speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144438156858087f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_attention2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_attention2_Mul */ Qnn_Param_t params__speech_prompted_text_encoder_attention2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__speech_prompted_text_encoder_attention2_Mul[] = { "_speech_prompted_text_encoder_attention2_out_fc_linear_Add_output_0", "text_mask" }; uint32_t dimensions__speech_prompted_text_encoder_attention2_Mul_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_attention2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_attention2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144438156858087f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_attention2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_attention2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_attention2_Mul, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_attention2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_attention2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_Add_1 */ Qnn_Param_t params__speech_prompted_text_encoder_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__speech_prompted_text_encoder_Add_1[] = { "_speech_prompted_text_encoder_attention2_Mul_output_0", "_speech_prompted_text_encoder_Transpose_output_0" }; uint32_t dimensions__speech_prompted_text_encoder_Add_1_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_Add_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0621990934014320f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_Add_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_Add_1, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_norm_norm_weight[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067348456941545f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_norm_norm_weight), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_speech_prompted_text_encoder_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_speech_prompted_text_encoder_norm_norm_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_speech_prompted_text_encoder_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_speech_prompted_text_encoder_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061173439025879f, .offset= -185}}}, .rank= 1, .dimensions=dimensions_tts_ttl_speech_prompted_text_encoder_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_speech_prompted_text_encoder_norm_norm_bias), .dataSize=BINLEN(tts_ttl_speech_prompted_text_encoder_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_norm_norm_LayerNormalization */ uint32_t dimensions__speech_prompted_text_encoder_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _speech_prompted_text_encoder_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__speech_prompted_text_encoder_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__speech_prompted_text_encoder_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_speech_prompted_text_encoder_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__speech_prompted_text_encoder_norm_norm_LayerNormalization[] = { "_speech_prompted_text_encoder_Add_1_output_0", "tts_ttl_speech_prompted_text_encoder_norm_norm_weight", "tts_ttl_speech_prompted_text_encoder_norm_norm_bias" }; uint32_t dimensions__speech_prompted_text_encoder_norm_Transpose_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_speech_prompted_text_encoder_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0411724708974361f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__speech_prompted_text_encoder_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__speech_prompted_text_encoder_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__speech_prompted_text_encoder_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__speech_prompted_text_encoder_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__speech_prompted_text_encoder_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _speech_prompted_text_encoder_Mul */ Qnn_Param_t params__speech_prompted_text_encoder_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__speech_prompted_text_encoder_Mul[] = { "_speech_prompted_text_encoder_norm_Transpose_output_0", "text_mask" }; uint32_t dimensions_text_emb[] = {1, 128, 256}; Qnn_Tensor_t outputs__speech_prompted_text_encoder_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_emb", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0411724708974361f, .offset= -159}}}, .rank= 3, .dimensions=dimensions_text_emb, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_speech_prompted_text_encoder_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__speech_prompted_text_encoder_Mul, // Node Params 1, // Num Node Params inputs__speech_prompted_text_encoder_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__speech_prompted_text_encoder_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } QNN_API ModelError_t QnnModel_composeGraphs(Qnn_BackendHandle_t backendHandle, QNN_INTERFACE_VER_TYPE interface, Qnn_ContextHandle_t contextHandle, const GraphConfigInfo_t** graphsConfigInfo, const uint32_t numGraphsConfigInfo, GraphInfoPtr_t** graphsInfo, uint32_t* numGraphsInfo, bool debug, QnnLog_Callback_t logCallback, QnnLog_Level_t maxLogLevel) { ModelError_t err = MODEL_NO_ERROR; /* model/graph for text_encoder_htp*/ QnnModel text_encoder_htp; const QnnGraph_Config_t** graphConfigs = nullptr; VALIDATE(getQnnGraphConfigFromInfo("text_encoder_htp", graphsConfigInfo, numGraphsConfigInfo, graphConfigs), err); VALIDATE(text_encoder_htp.initialize(backendHandle, interface, contextHandle, "text_encoder_htp", debug, DO_GRAPH_NODE_VALIDATIONS, graphConfigs), err); VALIDATE(addTensor_text_ids(text_encoder_htp), err); VALIDATE(addTensor_style_ttl(text_encoder_htp), err); VALIDATE(addTensor_text_mask(text_encoder_htp), err); VALIDATE(addNode_style_ttl_ncf(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_pre_reshape(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_pre_reshape(text_encoder_htp), err); VALIDATE(addNode_text_mask_ncf(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_text_embedder_char_embedder_weight(text_encoder_htp), err); VALIDATE(addNode__text_encoder_text_embedder_char_embedder_Gather(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Unsqueeze(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Unsqueeze_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_text_embedder_Transpose(text_encoder_htp), err); VALIDATE(addTensor_onnx__MatMul_3680(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_attention1_W_value_linear_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_W_value_linear_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_W_value_linear_MatMul_post_reshape(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Unsqueeze_6(text_encoder_htp), err); VALIDATE(addTensor_onnx__MatMul_3684(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_attention2_W_value_linear_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_W_value_linear_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_W_value_linear_MatMul_post_reshape(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_text_embedder_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_text_embedder_Mul_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Split_2(text_encoder_htp), err); VALIDATE(addTensor__speech_prompted_text_encoder_attention1_Constant_11_output_0(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Equal(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Split_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Equal(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_Mul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Unsqueeze_4(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Unsqueeze_5(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Unsqueeze_4(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Unsqueeze_5(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Concat_2(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Concat_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Pad(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_dwconv_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_dwconv_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_Mul_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__elementwiseneuron_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_pwconv2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_0_gamma(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_0_Mul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Pad(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_dwconv_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_dwconv_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_Mul_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__elementwiseneuron_6(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_pwconv2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_1_gamma(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_1_Mul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Pad(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_dwconv_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_dwconv_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_Mul_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__elementwiseneuron_8(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_pwconv2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_2_gamma(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_2_Mul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Pad(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_dwconv_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_dwconv_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_Mul_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__elementwiseneuron_10(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_pwconv2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_3_gamma(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_3_Mul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Pad(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_dwconv_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_dwconv_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_Mul_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__elementwiseneuron_12(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_pwconv2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_4_gamma(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_4_Mul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Pad(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_dwconv_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_dwconv_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_Mul_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__elementwiseneuron_14(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_pwconv2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_convnext_convnext_5_gamma(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_convnext_convnext_5_Mul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Mul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Mul_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_q_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_k_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_v_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Transpose(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Transpose_1(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_0_Constant_10_output_0(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Div(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_MatMul(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_7(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_10(text_encoder_htp), err); VALIDATE(addNode_Slice_0(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Add_2(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_0_Constant_88_output_0(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Where(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Softmax(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_13(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_16(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Slice_8(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_MatMul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Add_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Transpose_9(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_Reshape_19(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_0_conv_o_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_0_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Relu(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_0_conv_2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_0_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_0_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_q_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_k_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_v_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Transpose(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Transpose_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Div(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_MatMul(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_7(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_10(text_encoder_htp), err); VALIDATE(addNode_Slice_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Add_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Where(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Softmax(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_13(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_16(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Slice_8(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_MatMul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Add_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Transpose_9(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_Reshape_19(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_1_conv_o_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_2(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_1_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Relu(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_1_conv_2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_1_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_3(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_1_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_q_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_q_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_k_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_k_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_v_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_v_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Transpose(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Transpose_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Div(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_MatMul(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_2_Transpose_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_1_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_1_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_7(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_7_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_2_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_10(text_encoder_htp), err); VALIDATE(addNode_Slice_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Add_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Where(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Softmax(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Softmax_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_13(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_13_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Pad_4_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_16(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Slice_8(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_2_Unsqueeze_41_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_MatMul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Add_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Transpose_9(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_Reshape_19(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_2_conv_o_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_2_conv_o_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_4(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_2_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_1_2_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Relu(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Relu_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_2_conv_2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_conv_2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_2_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_5(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_2_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_2_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_2_Transpose_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_q_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_q_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_k_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_k_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_v_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_v_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Transpose(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Transpose_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Div(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_MatMul(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_3_Transpose_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_1_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_1_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_7(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_7_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_2_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_10(text_encoder_htp), err); VALIDATE(addNode_Slice_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Add_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Where(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Softmax(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Softmax_output_0_nhwc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_3_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_13(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_13_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Pad_4_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_16(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Slice_8(text_encoder_htp), err); VALIDATE(addTensor__text_encoder_attn_encoder_attn_layers_3_Unsqueeze_41_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_MatMul_3(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Add_4(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Transpose_9(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_Reshape_19(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_attn_layers_3_conv_o_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_attn_layers_3_conv_o_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_6(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_1_3_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_1_3_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Mul(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_1_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_1_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Relu(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Relu_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_1(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_1_output_0_ncf(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_reshape_to_2d_nhwc(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_ffn_layers_3_conv_2_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_2d(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate_nchw(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_intermediate(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_conv_2_Conv_output_0_nfc(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_ffn_layers_3_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Add_7(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_text_encoder_attn_encoder_norm_layers_2_3_norm_bias(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_norm_layers_2_3_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__text_encoder_attn_encoder_Mul_2(text_encoder_htp), err); VALIDATE(addNode__text_encoder_Add(text_encoder_htp), err); VALIDATE(addNode__text_encoder_proj_out_Mul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_pre_reshape(text_encoder_htp), err); VALIDATE(addTensor_onnx__MatMul_3678(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_attention1_W_query_linear_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_W_query_linear_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_W_query_linear_MatMul_post_reshape(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Split(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Unsqueeze(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Unsqueeze_1(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Concat(text_encoder_htp), err); VALIDATE(addTensor__speech_prompted_text_encoder_attention1_tanh_Tanh_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_MatMul(text_encoder_htp), err); VALIDATE(addTensor__speech_prompted_text_encoder_attention1_Constant_9_output_0(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Div(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Softmax(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Where(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_MatMul_1(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Split_3(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Concat_3(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Squeeze(text_encoder_htp), err); VALIDATE(addTensor_onnx__MatMul_3681(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_attention1_out_fc_linear_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_out_fc_linear_MatMul_post_reshape(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention1_Mul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_Add(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_pre_reshape(text_encoder_htp), err); VALIDATE(addTensor_onnx__MatMul_3682(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_attention2_W_query_linear_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_W_query_linear_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_W_query_linear_MatMul_post_reshape(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Split(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Unsqueeze(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Unsqueeze_1(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Concat(text_encoder_htp), err); VALIDATE(addTensor__speech_prompted_text_encoder_attention2_tanh_Tanh_output_0_nchw(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Div(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Softmax(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Where(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_MatMul_1(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Split_3(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Concat_3(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Squeeze(text_encoder_htp), err); VALIDATE(addTensor_onnx__MatMul_3685(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_attention2_out_fc_linear_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_out_fc_linear_MatMul_post_reshape(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_attention2_Mul(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_Add_1(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_norm_norm_weight(text_encoder_htp), err); VALIDATE(addTensor_tts_ttl_speech_prompted_text_encoder_norm_norm_bias(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_norm_norm_LayerNormalization(text_encoder_htp), err); VALIDATE(addNode__speech_prompted_text_encoder_Mul(text_encoder_htp), err); // Add all models to array to get graphsInfo QnnModel* models [] = {&text_encoder_htp}; uint32_t numModels = 1; // Populate the constructed graphs in provided output variables VALIDATE(getGraphInfoFromModels(*models, numModels, graphsInfo), err); *numGraphsInfo = numModels; return err; } // PREPARE_GRAPHS QNN_API ModelError_t QnnModel_freeGraphsInfo(GraphInfoPtr_t** graphsInfo, uint32_t numGraphsInfo){ return qnn_wrapper_api::freeGraphsInfo(graphsInfo, numGraphsInfo); } // FREEGRAPHINFO }