/* COPYRIGHT HEADER GOES HERE: No CopyRight Header String Passed During Model Conversion */ /* Command Line used: qnn-onnx-converter; act_bitwidth=8; act_quantizer=tf; act_quantizer_calibration=min-max; act_quantizer_schema=asymmetric; adjust_nms_features_dims=True; algorithms=[]; align_matmul_ranks=True; apply_masked_softmax=uncompressed; arch_checker=False; backend=None; batch=None; bias_bitwidth=8; calc_static_encodings=False; converter_op_package_lib=; copyright_file=None; custom_io=; custom_op_config_paths=None; debug=-1; defer_loading=False; define_symbol=None; disable_batchnorm_folding=False; disable_defer_loading=False; disable_node_validation=False; disable_qnn_op_config_validation=False; disable_relu_squashing=False; dry_run=None; dumpIR=False; dump_custom_io_config_template=; dump_encoding_json=False; dump_inferred_model=False; dump_qairt_io_config_yaml=; dump_qairt_quantizer_command=None; dump_value_info=False; enable_framework_trace=False; enable_match_gathernd=False; enable_match_topk=False; enable_per_row_quantized_bias=False; exclude_named_tensors=False; expand_gru_op_structure=True; expand_lstm_op_structure=False; expand_sparse_op_structure=False; export_format=cpp; extract_color_transform=True; float_bias_bitwidth=0; float_bias_bw=0; float_bitwidth=32; float_bw=32; float_fallback=False; force_prune_cast_ops=False; handle_gather_negative_indices=True; ignore_encodings=False; include_data_invariant_ops=False; inject_cast_for_gather=True; input_dim=[['latent', '1,144,192']]; input_dtype=[]; input_encoding=[]; input_layout=[]; input_list=./calibration_data/vocoder_input_list.txt; input_type=[]; keep_disconnected_nodes=False; keep_int64_inputs=False; keep_quant_nodes=False; keep_weights_quantized=False; match_caffe_ssd_to_tf=True; model_version=None; multi_time_steps_gru=False; multi_time_steps_lstm=False; no_simplification=False; op_package_lib=; out_names=['wav_tts']; overwrite_model_prefix=False; pack_4_bit_weights=False; package_name=None; packed_masked_softmax_inputs=[]; packed_max_seq=1; param_quantizer=tf; param_quantizer_calibration=min-max; param_quantizer_schema=asymmetric; percentile_calibration_value=99.99; perform_axes_to_spatial_first_order=True; perform_layout_transformation=False; prepare_inputs_as_params=False; preprocess_roi_pool_inputs=True; preserve_io=[]; preserve_onnx_output_order=False; quantization_overrides=; quantizer_log=None; restrict_quantization_steps=[]; squash_box_decoder=True; unroll_gru_time_steps=True; unroll_lstm_time_steps=True; use_aimet_quantizer=False; use_convert_quantization_nodes=False; use_dynamic_16_bit_weights=False; use_native_dtype=False; use_native_input_files=False; use_native_output_files=False; use_per_channel_quantization=False; use_per_row_quantization=False; use_quantize_v2=False; validate_models=False; weights_bitwidth=8 */ #include "QnnOpDef.h" #include "QnnModel.hpp" // Flag to determine if Backend should do node validation for each opNode added #define DO_GRAPH_NODE_VALIDATIONS 1 using namespace qnn_wrapper_api; const __attribute__((visibility("default"))) char* QNN_SDK_VERSION = "qaisw-v2.37.1.250807093845_124904"; extern "C" { static ModelError_t addTensor_latent(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_latent[] = {1, 192, 144}; VALIDATE(model.addTensor("latent", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "latent", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0277016554027796f, .offset= -129}}}, .rank= 3, .dimensions=dimensions_latent, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_normalizer_scale(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_normalizer_scale[] = {1}; VALIDATE(model.addTensor("tts_ttl_normalizer_scale", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_normalizer_scale", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009803922148421f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_normalizer_scale, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_normalizer_scale), .dataSize=BINLEN(tts_ttl_normalizer_scale)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Div */ Qnn_Param_t params__Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__Div[] = { "latent", "tts_ttl_normalizer_scale" }; uint32_t dimensions__Div_output_0[] = {1, 192, 144}; Qnn_Tensor_t outputs__Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1108066216111183f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Div, // Node Params 1, // Num Node Params inputs__Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Div_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Div_output_0_ncf */ uint32_t dimensions__Div_output_0_ncf_perm[] = {3}; uint32_t _Div_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__Div_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Div_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__Div_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_Div_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__Div_output_0_ncf[] = { "_Div_output_0" }; uint32_t dimensions__Div_output_0_ncf[] = {1, 144, 192}; Qnn_Tensor_t outputs__Div_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Div_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1108066216111183f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__Div_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Div_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__Div_output_0_ncf, // Node Params 1, // Num Node Params inputs__Div_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__Div_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Reshape */ const char* inputs__Reshape[] = { "_Div_output_0_ncf" }; uint32_t dimensions__Reshape_output_0[] = {1, 24, 6, 192}; Qnn_Tensor_t outputs__Reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1108066216111183f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__Reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__Reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Transpose */ uint32_t dimensions__Transpose_perm[] = {4}; uint32_t _Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__Transpose[] = { "_Reshape_output_0" }; uint32_t dimensions__Transpose_output_0[] = {1, 24, 192, 6}; Qnn_Tensor_t outputs__Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1108066216111183f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__Transpose, // Node Params 1, // Num Node Params inputs__Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Reshape_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Reshape_1 */ const char* inputs__Reshape_1[] = { "_Transpose_output_0" }; uint32_t dimensions__Reshape_1_output_0[] = {1, 24, 1152}; Qnn_Tensor_t outputs__Reshape_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1108066216111183f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Reshape_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__Reshape_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__Reshape_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_latent_std(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_latent_std[] = {1, 24, 1}; VALIDATE(model.addTensor("tts_ae_latent_std", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_latent_std", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004149454180151f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_latent_std, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_latent_std), .dataSize=BINLEN(tts_ae_latent_std)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Mul */ Qnn_Param_t params__Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__Mul[] = { "_Reshape_1_output_0", "tts_ae_latent_std" }; uint32_t dimensions__Mul_output_0[] = {1, 24, 1152}; Qnn_Tensor_t outputs__Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116167189553380f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Mul, // Node Params 1, // Num Node Params inputs__Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_latent_mean(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_latent_mean[] = {1, 24, 1}; VALIDATE(model.addTensor("tts_ae_latent_mean", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_latent_mean", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066031073220074f, .offset= -97}}}, .rank= 3, .dimensions=dimensions_tts_ae_latent_mean, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_latent_mean), .dataSize=BINLEN(tts_ae_latent_mean)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Add */ Qnn_Param_t params__Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__Add[] = { "_Mul_output_0", "tts_ae_latent_mean" }; uint32_t dimensions__Add_output_0[] = {1, 24, 1152}; Qnn_Tensor_t outputs__Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133755011484027f, .offset= -67}}}, .rank= 3, .dimensions=dimensions__Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Add, // Node Params 1, // Num Node Params inputs__Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Add_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Add_output_0_nfc */ uint32_t dimensions__Add_output_0_nfc_perm[] = {3}; uint32_t _Add_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__Add_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Add_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__Add_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_Add_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__Add_output_0_nfc[] = { "_Add_output_0" }; uint32_t dimensions__Add_output_0_nfc[] = {1, 1152, 24}; Qnn_Tensor_t outputs__Add_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Add_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133755011484027f, .offset= -67}}}, .rank= 3, .dimensions=dimensions__Add_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Add_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__Add_output_0_nfc, // Node Params 1, // Num Node Params inputs__Add_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__Add_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_Pad */ uint32_t dimensions__decoder_embed_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_embed_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_embed_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_embed_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_embed_Pad[] = { "_Add_output_0_nfc" }; uint32_t dimensions__decoder_embed_Pad_output_0[] = {1, 1158, 24}; Qnn_Tensor_t outputs__decoder_embed_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133755011484027f, .offset= -67}}}, .rank= 3, .dimensions=dimensions__decoder_embed_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_embed_Pad, // Node Params 2, // Num Node Params inputs__decoder_embed_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_Pad_output_0_ncf */ uint32_t dimensions__decoder_embed_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_embed_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_embed_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_embed_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_embed_Pad_output_0_ncf[] = { "_decoder_embed_Pad_output_0" }; uint32_t dimensions__decoder_embed_Pad_output_0_ncf[] = {1, 24, 1158}; Qnn_Tensor_t outputs__decoder_embed_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133755011484027f, .offset= -67}}}, .rank= 3, .dimensions=dimensions__decoder_embed_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_embed_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_embed_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_net_Conv_reshape_to_2d */ const char* inputs__decoder_embed_net_Conv_reshape_to_2d[] = { "_decoder_embed_Pad_output_0_ncf" }; uint32_t dimensions__decoder_embed_net_Conv_reshape_to_2d[] = {1, 24, 1, 1158}; Qnn_Tensor_t outputs__decoder_embed_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133755011484027f, .offset= -67}}}, .rank= 4, .dimensions=dimensions__decoder_embed_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_embed_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_embed_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_embed_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_embed_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_embed_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_embed_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_embed_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_embed_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 24}; Qnn_Tensor_t outputs__decoder_embed_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133755011484027f, .offset= -67}}}, .rank= 4, .dimensions=dimensions__decoder_embed_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_embed_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_embed_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__Conv_1440(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__Conv_1440[] = {1, 7, 24, 512}; VALIDATE(model.addTensor("onnx__Conv_1440", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__Conv_1440", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0289815925061703f, .offset= -151}}}, .rank= 4, .dimensions=dimensions_onnx__Conv_1440, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__Conv_1440), .dataSize=BINLEN(onnx__Conv_1440)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_onnx__Conv_1441(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__Conv_1441[] = {512}; VALIDATE(model.addTensor("onnx__Conv_1441", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__Conv_1441", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0450373366475105f, .offset= -158}}}, .rank= 1, .dimensions=dimensions_onnx__Conv_1441, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__Conv_1441), .dataSize=BINLEN(onnx__Conv_1441)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_embed_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_net_Conv_2d */ uint32_t dimensions__decoder_embed_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_embed_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_embed_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_embed_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_embed_net_Conv_2d_stride[] = {2}; uint32_t _decoder_embed_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_embed_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_embed_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_embed_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_embed_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_embed_net_Conv_2d[] = { "_decoder_embed_net_Conv_reshape_to_2d_nhwc", "onnx__Conv_1440", "onnx__Conv_1441" }; uint32_t dimensions__decoder_embed_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_embed_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_embed_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_net_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_embed_net_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_embed_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_embed_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_embed_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_embed_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_embed_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_embed_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_embed_net_Conv_intermediate_nchw[] = { "_decoder_embed_net_Conv_intermediate" }; uint32_t dimensions__decoder_embed_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_embed_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_embed_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_embed_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_embed_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_net_Conv_intermediate */ const char* inputs__decoder_embed_net_Conv_intermediate[] = { "_decoder_embed_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_embed_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_embed_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_embed_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_embed_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_embed_net_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_embed_net_Conv_output_0_nfc */ uint32_t dimensions__decoder_embed_net_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_embed_net_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_embed_net_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_embed_net_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_embed_net_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_embed_net_Conv_output_0_nfc[] = { "_decoder_embed_net_Conv_output_0" }; uint32_t dimensions__decoder_embed_net_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_embed_net_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_embed_net_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_embed_net_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_embed_net_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_embed_net_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_embed_net_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_embed_net_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_Pad */ uint32_t dimensions__decoder_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_0_dwconv_Pad[] = { "_decoder_embed_net_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_0_dwconv_Pad_output_0[] = {1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 1158}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1158}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275299660861492f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0086430134251714f, .offset= -124}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_0_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_0_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005409428849816f, .offset= -94}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_0_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_0_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_0_dwconv_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_0_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_0_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_0_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_dwconv_net_Conv_2d[] = { "_decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_0_dwconv_net_weight", "tts_ae_decoder_convnext_0_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0068576037883759f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_0_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_0_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_0_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_0_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0068576037883759f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_0_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_0_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_0_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0068576037883759f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_0_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_norm_Transpose */ uint32_t dimensions__decoder_convnext_0_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_0_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_0_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_norm_Transpose[] = { "_decoder_convnext_0_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_0_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0068576037883759f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026673187967390f, .offset= -3}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032730265520513f, .offset= -128}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_0_norm_norm_LayerNormalization[] = { "_decoder_convnext_0_norm_Transpose_output_0", "tts_ae_decoder_convnext_0_norm_norm_weight", "tts_ae_decoder_convnext_0_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_0_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0247755870223045f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0247755870223045f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0247755870223045f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0247755870223045f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0096003618091345f, .offset= -143}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014546492602676f, .offset= -225}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_0_pwconv1_Conv_2d[] = { "_decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_0_pwconv1_weight", "tts_ae_decoder_convnext_0_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0561055168509483f, .offset= -162}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0561055168509483f, .offset= -162}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_0_pwconv1_Conv_intermediate[] = { "_decoder_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_0_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0561055168509483f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_0 */ Qnn_Param_t params__elementwiseneuron_0[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_0[] = { "_decoder_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_0_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_0[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0210306979715824f, .offset= -8}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_0", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_0, // Node Params 1, // Num Node Params inputs__elementwiseneuron_0, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_0, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0210306979715824f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0210306979715824f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0113332755863667f, .offset= -97}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018022155854851f, .offset= -151}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_0_pwconv2_Conv_2d[] = { "_decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_0_pwconv2_weight", "tts_ae_decoder_convnext_0_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1055153757333755f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1055153757333755f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_0_pwconv2_Conv_intermediate[] = { "_decoder_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1055153757333755f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1055153757333755f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003375311498530f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_0_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_Mul */ Qnn_Param_t params__decoder_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_0_Mul[] = { "tts_ae_decoder_convnext_0_gamma", "_decoder_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_0_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0051466901786625f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_0_Add */ Qnn_Param_t params__decoder_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_0_Add[] = { "_decoder_embed_net_Conv_output_0_nfc", "_decoder_convnext_0_Mul_output_0" }; uint32_t dimensions__decoder_convnext_0_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0269599910825491f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_0_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_Pad */ uint32_t dimensions__decoder_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 12, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_1_dwconv_Pad[] = { "_decoder_convnext_0_Add_output_0" }; uint32_t dimensions__decoder_convnext_1_dwconv_Pad_output_0[] = {1, 1164, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0269599910825491f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 512, 1164}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0269599910825491f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1164}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0269599910825491f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1164, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0269599910825491f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081031657755375f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_1_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_1_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007066628313623f, .offset= -163}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_1_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_1_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_1_dwconv_net_Conv_2d_dilation[] = {1, 2}; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_1_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_1_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_1_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_dwconv_net_Conv_2d[] = { "_decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_1_dwconv_net_weight", "tts_ae_decoder_convnext_1_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081018367782235f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_1_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_1_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_1_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_1_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081018367782235f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_1_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_1_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_1_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081018367782235f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_1_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_norm_Transpose */ uint32_t dimensions__decoder_convnext_1_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_1_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_1_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_norm_Transpose[] = { "_decoder_convnext_1_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_1_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081018367782235f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037790858186781f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025524618104100f, .offset= -152}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_1_norm_norm_LayerNormalization[] = { "_decoder_convnext_1_norm_Transpose_output_0", "tts_ae_decoder_convnext_1_norm_norm_weight", "tts_ae_decoder_convnext_1_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_1_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0252695586532354f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0252695586532354f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0252695586532354f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0252695586532354f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095272772014141f, .offset= -135}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011574596865103f, .offset= -224}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_1_pwconv1_Conv_2d[] = { "_decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_1_pwconv1_weight", "tts_ae_decoder_convnext_1_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0478574186563492f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0478574186563492f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_1_pwconv1_Conv_intermediate[] = { "_decoder_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_1_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0478574186563492f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_2 */ Qnn_Param_t params__elementwiseneuron_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_2[] = { "_decoder_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_1_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0200931727886200f, .offset= -8}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_2", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_2, // Node Params 1, // Num Node Params inputs__elementwiseneuron_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0200931727886200f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0200931727886200f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116961626335979f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021260855719447f, .offset= -146}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_1_pwconv2_Conv_2d[] = { "_decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_1_pwconv2_weight", "tts_ae_decoder_convnext_1_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0720328018069267f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0720328018069267f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_1_pwconv2_Conv_intermediate[] = { "_decoder_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0720328018069267f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0720328018069267f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_1_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003534131392371f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_1_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_Mul */ Qnn_Param_t params__decoder_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_1_Mul[] = { "tts_ae_decoder_convnext_1_gamma", "_decoder_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_1_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033053972292691f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_1_Add */ Qnn_Param_t params__decoder_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_1_Add[] = { "_decoder_convnext_0_Add_output_0", "_decoder_convnext_1_Mul_output_0" }; uint32_t dimensions__decoder_convnext_1_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270896498113871f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_1_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_Pad */ uint32_t dimensions__decoder_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 24, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_2_dwconv_Pad[] = { "_decoder_convnext_1_Add_output_0" }; uint32_t dimensions__decoder_convnext_2_dwconv_Pad_output_0[] = {1, 1176, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270896498113871f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 512, 1176}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270896498113871f, .offset= -140}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1176}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270896498113871f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1176, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0270896498113871f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069635719992220f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_2_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_2_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006401636055671f, .offset= -143}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_2_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_2_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_2_dwconv_net_Conv_2d_dilation[] = {1, 4}; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_2_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_2_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_2_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_dwconv_net_Conv_2d[] = { "_decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_2_dwconv_net_weight", "tts_ae_decoder_convnext_2_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052337730303407f, .offset= -108}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_2_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_2_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_2_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_2_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052337730303407f, .offset= -108}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_2_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_2_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_2_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052337730303407f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_2_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_norm_Transpose */ uint32_t dimensions__decoder_convnext_2_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_2_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_2_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_norm_Transpose[] = { "_decoder_convnext_2_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_2_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052337730303407f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031944427173585f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043592117726803f, .offset= -115}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_2_norm_norm_LayerNormalization[] = { "_decoder_convnext_2_norm_Transpose_output_0", "tts_ae_decoder_convnext_2_norm_norm_weight", "tts_ae_decoder_convnext_2_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_2_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212977286428213f, .offset= -126}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212977286428213f, .offset= -126}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212977286428213f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212977286428213f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083414623513818f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019212916959077f, .offset= -224}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_2_pwconv1_Conv_2d[] = { "_decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_2_pwconv1_weight", "tts_ae_decoder_convnext_2_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0531625971198082f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0531625971198082f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_2_pwconv1_Conv_intermediate[] = { "_decoder_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_2_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0531625971198082f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_4 */ Qnn_Param_t params__elementwiseneuron_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_4[] = { "_decoder_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_2_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138792395591736f, .offset= -12}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_4", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_4, // Node Params 1, // Num Node Params inputs__elementwiseneuron_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138792395591736f, .offset= -12}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138792395591736f, .offset= -12}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120781557634473f, .offset= -141}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015181814087555f, .offset= -74}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_2_pwconv2_Conv_2d[] = { "_decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_2_pwconv2_weight", "tts_ae_decoder_convnext_2_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0400269180536270f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0400269180536270f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_2_pwconv2_Conv_intermediate[] = { "_decoder_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0400269180536270f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0400269180536270f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_2_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007479430059902f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_2_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_Mul */ Qnn_Param_t params__decoder_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_2_Mul[] = { "tts_ae_decoder_convnext_2_gamma", "_decoder_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_2_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043869148939848f, .offset= -77}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_2_Add */ Qnn_Param_t params__decoder_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_2_Add[] = { "_decoder_convnext_1_Add_output_0", "_decoder_convnext_2_Mul_output_0" }; uint32_t dimensions__decoder_convnext_2_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0271226577460766f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_2_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_Pad */ uint32_t dimensions__decoder_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_3_dwconv_Pad[] = { "_decoder_convnext_2_Add_output_0" }; uint32_t dimensions__decoder_convnext_3_dwconv_Pad_output_0[] = {1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0271226577460766f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 512, 1158}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0271226577460766f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1158}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0271226577460766f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0271226577460766f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054915645159781f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_3_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_3_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009515552665107f, .offset= -153}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_3_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_3_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_3_dwconv_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_3_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_3_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_3_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_dwconv_net_Conv_2d[] = { "_decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_3_dwconv_net_weight", "tts_ae_decoder_convnext_3_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079700071364641f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_3_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_3_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_3_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_3_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079700071364641f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_3_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_3_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_3_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079700071364641f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_3_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_norm_Transpose */ uint32_t dimensions__decoder_convnext_3_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_3_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_3_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_norm_Transpose[] = { "_decoder_convnext_3_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_3_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079700071364641f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030340084340423f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030495193786919f, .offset= -111}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_3_norm_norm_LayerNormalization[] = { "_decoder_convnext_3_norm_Transpose_output_0", "tts_ae_decoder_convnext_3_norm_norm_weight", "tts_ae_decoder_convnext_3_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_3_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0379491746425629f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0379491746425629f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0379491746425629f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0379491746425629f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0093550579622388f, .offset= -123}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017764862859622f, .offset= -210}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_3_pwconv1_Conv_2d[] = { "_decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_3_pwconv1_weight", "tts_ae_decoder_convnext_3_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1036707013845444f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1036707013845444f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_3_pwconv1_Conv_intermediate[] = { "_decoder_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_3_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1036707013845444f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_6 */ Qnn_Param_t params__elementwiseneuron_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_6[] = { "_decoder_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_3_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430368147790432f, .offset= -4}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_6", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_6, // Node Params 1, // Num Node Params inputs__elementwiseneuron_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430368147790432f, .offset= -4}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430368147790432f, .offset= -4}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116108441725373f, .offset= -123}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016988650895655f, .offset= -63}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_3_pwconv2_Conv_2d[] = { "_decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_3_pwconv2_weight", "tts_ae_decoder_convnext_3_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1435610949993134f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1435610949993134f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_3_pwconv2_Conv_intermediate[] = { "_decoder_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1435610949993134f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1435610949993134f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_3_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018470111535862f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_3_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_Mul */ Qnn_Param_t params__decoder_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_3_Mul[] = { "tts_ae_decoder_convnext_3_gamma", "_decoder_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_3_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182019546627998f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_3_Add */ Qnn_Param_t params__decoder_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_3_Add[] = { "_decoder_convnext_2_Add_output_0", "_decoder_convnext_3_Mul_output_0" }; uint32_t dimensions__decoder_convnext_3_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0242276769131422f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_3_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_Pad */ uint32_t dimensions__decoder_convnext_4_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_4_dwconv_Pad_pad_amount[] = {0, 0, 12, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_4_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_4_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_4_dwconv_Pad[] = { "_decoder_convnext_3_Add_output_0" }; uint32_t dimensions__decoder_convnext_4_dwconv_Pad_output_0[] = {1, 1164, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0242276769131422f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_4_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_4_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_4_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_4_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_4_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_4_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_4_dwconv_Pad_output_0_ncf[] = {1, 512, 1164}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0242276769131422f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_4_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1164}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0242276769131422f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1164, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0242276769131422f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0063276514410973f, .offset= -117}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_4_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_4_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008792460430413f, .offset= -92}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_4_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_4_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_4_dwconv_net_Conv_2d_dilation[] = {1, 2}; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_4_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_4_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_4_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_dwconv_net_Conv_2d[] = { "_decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_4_dwconv_net_weight", "tts_ae_decoder_convnext_4_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066890860907733f, .offset= -109}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_4_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_4_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_4_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_4_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066890860907733f, .offset= -109}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_4_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_4_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_4_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066890860907733f, .offset= -109}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_4_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_norm_Transpose */ uint32_t dimensions__decoder_convnext_4_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_4_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_4_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_norm_Transpose[] = { "_decoder_convnext_4_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_4_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066890860907733f, .offset= -109}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0051333094015718f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_4_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_4_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023312498815358f, .offset= -109}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_4_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_4_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_4_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_4_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_4_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_4_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_4_norm_norm_LayerNormalization[] = { "_decoder_convnext_4_norm_Transpose_output_0", "tts_ae_decoder_convnext_4_norm_norm_weight", "tts_ae_decoder_convnext_4_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_4_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0311640165746212f, .offset= -117}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_4_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_4_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_4_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_4_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_4_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_4_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_4_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_4_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0311640165746212f, .offset= -117}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_4_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0311640165746212f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0311640165746212f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0110574550926685f, .offset= -137}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_4_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_4_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012379029067233f, .offset= -216}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_4_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_4_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_4_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_4_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_4_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_4_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_4_pwconv1_Conv_2d[] = { "_decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_4_pwconv1_weight", "tts_ae_decoder_convnext_4_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0710149407386780f, .offset= -179}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_4_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_4_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_4_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_4_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_4_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0710149407386780f, .offset= -179}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_4_pwconv1_Conv_intermediate[] = { "_decoder_convnext_4_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_4_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0710149407386780f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_4_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_8 */ Qnn_Param_t params__elementwiseneuron_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_8[] = { "_decoder_convnext_4_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_4_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219253785908222f, .offset= -8}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_8", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_8, // Node Params 1, // Num Node Params inputs__elementwiseneuron_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_4_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219253785908222f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219253785908222f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0129605978727341f, .offset= -103}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_4_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_4_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019027900416404f, .offset= -58}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_4_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_4_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_4_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_4_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_4_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_4_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_4_pwconv2_Conv_2d[] = { "_decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_4_pwconv2_weight", "tts_ae_decoder_convnext_4_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440519712865353f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_4_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_4_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_4_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_4_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_4_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440519712865353f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_4_pwconv2_Conv_intermediate[] = { "_decoder_convnext_4_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440519712865353f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_4_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_4_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_4_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_4_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_4_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_4_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_4_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0440519712865353f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_4_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_4_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_4_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_4_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_4_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_4_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017980288248509f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_4_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_4_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_4_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_4_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_Mul */ Qnn_Param_t params__decoder_convnext_4_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_4_Mul[] = { "tts_ae_decoder_convnext_4_gamma", "_decoder_convnext_4_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_4_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0084969419986010f, .offset= -97}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_4_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_4_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_4_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_4_Add */ Qnn_Param_t params__decoder_convnext_4_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_4_Add[] = { "_decoder_convnext_3_Add_output_0", "_decoder_convnext_4_Mul_output_0" }; uint32_t dimensions__decoder_convnext_4_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_4_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_4_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0243241861462593f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_4_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_4_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_4_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_4_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_4_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_Pad */ uint32_t dimensions__decoder_convnext_5_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_5_dwconv_Pad_pad_amount[] = {0, 0, 24, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_5_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_5_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_5_dwconv_Pad[] = { "_decoder_convnext_4_Add_output_0" }; uint32_t dimensions__decoder_convnext_5_dwconv_Pad_output_0[] = {1, 1176, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0243241861462593f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_5_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_5_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_5_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_5_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_5_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_5_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_5_dwconv_Pad_output_0_ncf[] = {1, 512, 1176}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0243241861462593f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_5_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1176}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0243241861462593f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1176, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0243241861462593f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047507355920970f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_5_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_5_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012152857379988f, .offset= -110}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_5_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_5_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_5_dwconv_net_Conv_2d_dilation[] = {1, 4}; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_5_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_5_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_5_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_dwconv_net_Conv_2d[] = { "_decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_5_dwconv_net_weight", "tts_ae_decoder_convnext_5_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077802943997085f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_5_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_5_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_5_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_5_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077802943997085f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_5_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_5_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_5_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077802943997085f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_5_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_norm_Transpose */ uint32_t dimensions__decoder_convnext_5_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_5_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_5_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_norm_Transpose[] = { "_decoder_convnext_5_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_5_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077802943997085f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033553175162524f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_5_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_5_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025873223785311f, .offset= -119}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_5_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_5_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_5_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_5_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_5_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_5_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_5_norm_norm_LayerNormalization[] = { "_decoder_convnext_5_norm_Transpose_output_0", "tts_ae_decoder_convnext_5_norm_norm_weight", "tts_ae_decoder_convnext_5_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_5_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0357526391744614f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_5_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_5_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_5_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_5_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_5_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_5_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_5_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_5_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0357526391744614f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_5_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0357526391744614f, .offset= -154}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0357526391744614f, .offset= -154}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138931749388576f, .offset= -111}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_5_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_5_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021083673927933f, .offset= -189}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_5_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_5_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_5_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_5_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_5_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_5_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_5_pwconv1_Conv_2d[] = { "_decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_5_pwconv1_weight", "tts_ae_decoder_convnext_5_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0781335607171059f, .offset= -188}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_5_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_5_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_5_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_5_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_5_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0781335607171059f, .offset= -188}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_5_pwconv1_Conv_intermediate[] = { "_decoder_convnext_5_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_5_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0781335607171059f, .offset= -188}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_5_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_10 */ Qnn_Param_t params__elementwiseneuron_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_10[] = { "_decoder_convnext_5_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_5_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212299972772598f, .offset= -8}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_10", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_10, // Node Params 1, // Num Node Params inputs__elementwiseneuron_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_5_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212299972772598f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0212299972772598f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133028617128730f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_5_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_5_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027160537429154f, .offset= -101}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_5_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_5_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_5_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_5_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_5_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_5_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_5_pwconv2_Conv_2d[] = { "_decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_5_pwconv2_weight", "tts_ae_decoder_convnext_5_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0419837944209576f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_5_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_5_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_5_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_5_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_5_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0419837944209576f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_5_pwconv2_Conv_intermediate[] = { "_decoder_convnext_5_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0419837944209576f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_5_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_5_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_5_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_5_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_5_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_5_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_5_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0419837944209576f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_5_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_5_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_5_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_5_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_5_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_5_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025209623854607f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_5_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_5_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_5_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_5_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_Mul */ Qnn_Param_t params__decoder_convnext_5_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_5_Mul[] = { "tts_ae_decoder_convnext_5_gamma", "_decoder_convnext_5_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_5_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0172711480408907f, .offset= -144}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_5_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_5_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_5_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_5_Add */ Qnn_Param_t params__decoder_convnext_5_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_5_Add[] = { "_decoder_convnext_4_Add_output_0", "_decoder_convnext_5_Mul_output_0" }; uint32_t dimensions__decoder_convnext_5_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_5_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_5_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244567580521107f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_5_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_5_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_5_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_5_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_5_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_Pad */ uint32_t dimensions__decoder_convnext_6_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_6_dwconv_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_6_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_6_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_6_dwconv_Pad[] = { "_decoder_convnext_5_Add_output_0" }; uint32_t dimensions__decoder_convnext_6_dwconv_Pad_output_0[] = {1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244567580521107f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_6_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_6_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_6_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_6_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_6_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_6_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_6_dwconv_Pad_output_0_ncf[] = {1, 512, 1158}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244567580521107f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_6_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1158}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244567580521107f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244567580521107f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050785318017006f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_6_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_6_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021466796752065f, .offset= -179}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_6_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_6_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_6_dwconv_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_6_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_6_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_6_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_dwconv_net_Conv_2d[] = { "_decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_6_dwconv_net_weight", "tts_ae_decoder_convnext_6_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0086057027801871f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_6_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_6_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_6_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_6_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0086057027801871f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_6_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_6_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_6_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0086057027801871f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_6_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_norm_Transpose */ uint32_t dimensions__decoder_convnext_6_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_6_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_6_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_norm_Transpose[] = { "_decoder_convnext_6_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_6_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0086057027801871f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039900871925056f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_6_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_6_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030426005832851f, .offset= -118}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_6_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_6_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_6_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_6_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_6_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_6_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_6_norm_norm_LayerNormalization[] = { "_decoder_convnext_6_norm_Transpose_output_0", "tts_ae_decoder_convnext_6_norm_norm_weight", "tts_ae_decoder_convnext_6_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_6_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0313151702284813f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_6_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_6_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_6_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_6_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_6_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_6_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_6_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_6_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0313151702284813f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_6_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0313151702284813f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0313151702284813f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0109999878332019f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_6_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_6_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027730739675462f, .offset= -203}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_6_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_6_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_6_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_6_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_6_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_6_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_6_pwconv1_Conv_2d[] = { "_decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_6_pwconv1_weight", "tts_ae_decoder_convnext_6_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0769425928592682f, .offset= -169}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_6_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_6_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_6_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_6_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_6_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0769425928592682f, .offset= -169}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_6_pwconv1_Conv_intermediate[] = { "_decoder_convnext_6_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_6_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0769425928592682f, .offset= -169}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_6_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_12 */ Qnn_Param_t params__elementwiseneuron_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_12[] = { "_decoder_convnext_6_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_6_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0266522094607353f, .offset= -6}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_12", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_12, // Node Params 1, // Num Node Params inputs__elementwiseneuron_12, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_6_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0266522094607353f, .offset= -6}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0266522094607353f, .offset= -6}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0168958771973848f, .offset= -120}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_6_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_6_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025493607390672f, .offset= -82}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_6_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_6_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_6_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_6_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_6_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_6_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_6_pwconv2_Conv_2d[] = { "_decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_6_pwconv2_weight", "tts_ae_decoder_convnext_6_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0673748031258583f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_6_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_6_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_6_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_6_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_6_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0673748031258583f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_6_pwconv2_Conv_intermediate[] = { "_decoder_convnext_6_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0673748031258583f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_6_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_6_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_6_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_6_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_6_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_6_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_6_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0673748031258583f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_6_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_6_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_6_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_6_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_6_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_6_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036161139141768f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_6_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_6_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_6_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_6_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_Mul */ Qnn_Param_t params__decoder_convnext_6_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_6_Mul[] = { "tts_ae_decoder_convnext_6_gamma", "_decoder_convnext_6_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_6_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0287050120532513f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_6_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_6_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_6_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_6_Add */ Qnn_Param_t params__decoder_convnext_6_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_6_Add[] = { "_decoder_convnext_5_Add_output_0", "_decoder_convnext_6_Mul_output_0" }; uint32_t dimensions__decoder_convnext_6_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_6_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_6_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0317676998674870f, .offset= -141}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_6_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_6_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_6_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_6_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_6_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_Pad */ uint32_t dimensions__decoder_convnext_7_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_7_dwconv_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_7_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_7_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_7_dwconv_Pad[] = { "_decoder_convnext_6_Add_output_0" }; uint32_t dimensions__decoder_convnext_7_dwconv_Pad_output_0[] = {1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0317676998674870f, .offset= -141}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_7_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_7_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_7_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_7_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_7_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_7_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_7_dwconv_Pad_output_0_ncf[] = {1, 512, 1158}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0317676998674870f, .offset= -141}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_7_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1158}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0317676998674870f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0317676998674870f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047952956520021f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_7_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_7_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013053971342742f, .offset= -176}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_7_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_7_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_7_dwconv_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_7_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_7_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_7_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_dwconv_net_Conv_2d[] = { "_decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_7_dwconv_net_weight", "tts_ae_decoder_convnext_7_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0150569090619683f, .offset= -110}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_7_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_7_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_7_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_7_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0150569090619683f, .offset= -110}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_7_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_7_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_7_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0150569090619683f, .offset= -110}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_7_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_norm_Transpose */ uint32_t dimensions__decoder_convnext_7_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_7_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_7_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_norm_Transpose[] = { "_decoder_convnext_7_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_7_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0150569090619683f, .offset= -110}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034618375357240f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_7_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_7_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047091664746404f, .offset= -126}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_7_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_7_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_7_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_7_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_7_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_7_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_7_norm_norm_LayerNormalization[] = { "_decoder_convnext_7_norm_Transpose_output_0", "tts_ae_decoder_convnext_7_norm_norm_weight", "tts_ae_decoder_convnext_7_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_7_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286649912595749f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_7_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_7_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_7_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_7_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_7_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_7_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_7_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_7_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286649912595749f, .offset= -161}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_7_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286649912595749f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286649912595749f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141015937551856f, .offset= -99}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_7_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_7_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027201694902033f, .offset= -221}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_7_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_7_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_7_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_7_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_7_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_7_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_7_pwconv1_Conv_2d[] = { "_decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_7_pwconv1_weight", "tts_ae_decoder_convnext_7_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0944140627980232f, .offset= -175}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_7_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_7_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_7_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_7_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_7_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0944140627980232f, .offset= -175}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_7_pwconv1_Conv_intermediate[] = { "_decoder_convnext_7_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_7_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0944140627980232f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_7_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_14 */ Qnn_Param_t params__elementwiseneuron_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_14[] = { "_decoder_convnext_7_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_7_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302409324795008f, .offset= -6}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_14", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_14, // Node Params 1, // Num Node Params inputs__elementwiseneuron_14, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_7_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302409324795008f, .offset= -6}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302409324795008f, .offset= -6}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0333431661128998f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_7_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_7_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027751962188631f, .offset= -75}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_7_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_7_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_7_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_7_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_7_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_7_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_7_pwconv2_Conv_2d[] = { "_decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_7_pwconv2_weight", "tts_ae_decoder_convnext_7_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0862952843308449f, .offset= -163}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_7_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_7_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_7_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_7_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_7_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0862952843308449f, .offset= -163}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_7_pwconv2_Conv_intermediate[] = { "_decoder_convnext_7_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0862952843308449f, .offset= -163}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_7_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_7_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_7_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_7_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_7_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_7_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_7_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0862952843308449f, .offset= -163}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_7_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_7_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_7_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_7_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_7_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_7_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050359251908958f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_7_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_7_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_7_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_7_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_Mul */ Qnn_Param_t params__decoder_convnext_7_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_7_Mul[] = { "tts_ae_decoder_convnext_7_gamma", "_decoder_convnext_7_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_7_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0451676510274410f, .offset= -109}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_7_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_7_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_7_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_7_Add */ Qnn_Param_t params__decoder_convnext_7_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_7_Add[] = { "_decoder_convnext_6_Add_output_0", "_decoder_convnext_7_Mul_output_0" }; uint32_t dimensions__decoder_convnext_7_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_7_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_7_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0592633225023746f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_7_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_7_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_7_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_7_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_7_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_Pad */ uint32_t dimensions__decoder_convnext_8_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_8_dwconv_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_8_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_8_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_8_dwconv_Pad[] = { "_decoder_convnext_7_Add_output_0" }; uint32_t dimensions__decoder_convnext_8_dwconv_Pad_output_0[] = {1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0592633225023746f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_8_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_8_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_8_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_8_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_8_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_8_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_8_dwconv_Pad_output_0_ncf[] = {1, 512, 1158}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0592633225023746f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_8_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1158}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0592633225023746f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0592633225023746f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045913890935481f, .offset= -139}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_8_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_8_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012702975654975f, .offset= -156}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_8_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_8_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_8_dwconv_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_8_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_8_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_8_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_dwconv_net_Conv_2d[] = { "_decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_8_dwconv_net_weight", "tts_ae_decoder_convnext_8_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184707604348660f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_8_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_8_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_8_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_8_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184707604348660f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_8_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_8_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_8_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184707604348660f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_8_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_norm_Transpose */ uint32_t dimensions__decoder_convnext_8_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_8_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_8_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_norm_Transpose[] = { "_decoder_convnext_8_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_8_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184707604348660f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029213661327958f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_8_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_8_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017558336257935f, .offset= -153}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_8_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_8_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_8_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_8_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_8_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_8_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_8_norm_norm_LayerNormalization[] = { "_decoder_convnext_8_norm_Transpose_output_0", "tts_ae_decoder_convnext_8_norm_norm_weight", "tts_ae_decoder_convnext_8_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_8_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279711559414864f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_8_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_8_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_8_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_8_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_8_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_8_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_8_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_8_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279711559414864f, .offset= -130}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_8_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279711559414864f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279711559414864f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0121532212942839f, .offset= -122}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_8_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_8_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023180642165244f, .offset= -199}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_8_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_8_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_8_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_8_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_8_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_8_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_8_pwconv1_Conv_2d[] = { "_decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_8_pwconv1_weight", "tts_ae_decoder_convnext_8_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0958585962653160f, .offset= -152}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_8_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_8_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_8_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_8_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_8_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0958585962653160f, .offset= -152}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_8_pwconv1_Conv_intermediate[] = { "_decoder_convnext_8_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_8_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0958585962653160f, .offset= -152}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_8_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_16 */ Qnn_Param_t params__elementwiseneuron_16[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_16[] = { "_decoder_convnext_8_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_8_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0394865386188030f, .offset= -4}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_16", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_16, // Node Params 1, // Num Node Params inputs__elementwiseneuron_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_8_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0394865386188030f, .offset= -4}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0394865386188030f, .offset= -4}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211502686142921f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_8_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_8_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032881039660424f, .offset= -100}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_8_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_8_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_8_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_8_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_8_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_8_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_8_pwconv2_Conv_2d[] = { "_decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_8_pwconv2_weight", "tts_ae_decoder_convnext_8_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1056430414319038f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_8_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_8_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_8_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_8_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_8_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1056430414319038f, .offset= -141}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_8_pwconv2_Conv_intermediate[] = { "_decoder_convnext_8_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1056430414319038f, .offset= -141}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_8_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_8_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_8_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_8_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_8_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_8_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_8_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1056430414319038f, .offset= -141}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_8_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_8_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_8_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_8_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_8_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_8_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0068632233887911f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_8_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_8_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_8_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_8_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_Mul */ Qnn_Param_t params__decoder_convnext_8_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_8_Mul[] = { "tts_ae_decoder_convnext_8_gamma", "_decoder_convnext_8_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_8_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0664195641875267f, .offset= -106}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_8_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_8_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_8_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_8_Add */ Qnn_Param_t params__decoder_convnext_8_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_8_Add[] = { "_decoder_convnext_7_Add_output_0", "_decoder_convnext_8_Mul_output_0" }; uint32_t dimensions__decoder_convnext_8_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_8_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_8_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745441913604736f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_8_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_8_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_8_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_8_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_8_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_Pad */ uint32_t dimensions__decoder_convnext_9_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_convnext_9_dwconv_Pad_pad_amount[] = {0, 0, 6, 0, 0, 0}; Qnn_Param_t params__decoder_convnext_9_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_9_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_convnext_9_dwconv_Pad[] = { "_decoder_convnext_8_Add_output_0" }; uint32_t dimensions__decoder_convnext_9_dwconv_Pad_output_0[] = {1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745441913604736f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_convnext_9_dwconv_Pad, // Node Params 2, // Num Node Params inputs__decoder_convnext_9_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_Pad_output_0_ncf */ uint32_t dimensions__decoder_convnext_9_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_9_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_9_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_dwconv_Pad_output_0_ncf[] = { "_decoder_convnext_9_dwconv_Pad_output_0" }; uint32_t dimensions__decoder_convnext_9_dwconv_Pad_output_0_ncf[] = {1, 512, 1158}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745441913604736f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_net_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d[] = { "_decoder_convnext_9_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d[] = {1, 512, 1, 1158}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745441913604736f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1158, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745441913604736f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_dwconv_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_dwconv_net_weight[] = {1, 7, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_dwconv_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_dwconv_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031902899499983f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_9_dwconv_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_dwconv_net_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_9_dwconv_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_dwconv_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_dwconv_net_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_dwconv_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_dwconv_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028330837376416f, .offset= -214}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_9_dwconv_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_dwconv_net_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_9_dwconv_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_net_Conv_2d */ uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_9_dwconv_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_9_dwconv_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_9_dwconv_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_9_dwconv_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_dwconv_net_Conv_2d[] = { "_decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_9_dwconv_net_weight", "tts_ae_decoder_convnext_9_dwconv_net_bias" }; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0208770222961903f, .offset= -212}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_net_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__decoder_convnext_9_dwconv_net_Conv_2d, // Node Params 3, // Num Node Params inputs__decoder_convnext_9_dwconv_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_9_dwconv_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_dwconv_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw[] = { "_decoder_convnext_9_dwconv_net_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0208770222961903f, .offset= -212}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_dwconv_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_dwconv_net_Conv_intermediate */ const char* inputs__decoder_convnext_9_dwconv_net_Conv_intermediate[] = { "_decoder_convnext_9_dwconv_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_9_dwconv_net_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_dwconv_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_dwconv_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0208770222961903f, .offset= -212}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_dwconv_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_dwconv_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_9_dwconv_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_dwconv_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_norm_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_norm_Transpose */ uint32_t dimensions__decoder_convnext_9_norm_Transpose_perm[] = {3}; uint32_t _decoder_convnext_9_norm_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_9_norm_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_norm_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_norm_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_norm_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_norm_Transpose[] = { "_decoder_convnext_9_dwconv_net_Conv_output_0" }; uint32_t dimensions__decoder_convnext_9_norm_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_norm_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0208770222961903f, .offset= -212}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_norm_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_norm_Transpose, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_norm_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_norm_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025272252969444f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_9_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_norm_norm_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_9_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011819071369246f, .offset= -152}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_9_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_norm_norm_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_9_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_9_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_norm_norm_LayerNormalization */ uint32_t dimensions__decoder_convnext_9_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _decoder_convnext_9_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__decoder_convnext_9_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__decoder_convnext_9_norm_norm_LayerNormalization[] = { "_decoder_convnext_9_norm_Transpose_output_0", "tts_ae_decoder_convnext_9_norm_norm_weight", "tts_ae_decoder_convnext_9_norm_norm_bias" }; uint32_t dimensions__decoder_convnext_9_norm_Transpose_1_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273284371942282f, .offset= -148}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__decoder_convnext_9_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__decoder_convnext_9_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_9_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__decoder_convnext_9_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _decoder_convnext_9_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_9_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_norm_Transpose_1_output_0_ncf[] = { "_decoder_convnext_9_norm_Transpose_1_output_0" }; uint32_t dimensions__decoder_convnext_9_norm_Transpose_1_output_0_ncf[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273284371942282f, .offset= -148}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv1_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d[] = { "_decoder_convnext_9_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273284371942282f, .offset= -148}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273284371942282f, .offset= -148}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_pwconv1_weight[] = {1, 1, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126696489751339f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_9_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_pwconv1_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_9_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_pwconv1_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0107472361996770f, .offset= -42}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_9_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_pwconv1_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_9_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv1_Conv_2d */ uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_9_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_9_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_9_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_9_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_9_pwconv1_Conv_2d[] = { "_decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_9_pwconv1_weight", "tts_ae_decoder_convnext_9_pwconv1_bias" }; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0502345189452171f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_9_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_9_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_9_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_9_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_pwconv1_Conv_intermediate_nchw[] = { "_decoder_convnext_9_pwconv1_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0502345189452171f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv1_Conv_intermediate */ const char* inputs__decoder_convnext_9_pwconv1_Conv_intermediate[] = { "_decoder_convnext_9_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_9_pwconv1_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0502345189452171f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_9_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_18(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_18 */ Qnn_Param_t params__elementwiseneuron_18[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_18[] = { "_decoder_convnext_9_pwconv1_Conv_output_0" }; uint32_t dimensions__decoder_convnext_9_act_Mul_1_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__elementwiseneuron_18[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255935359746218f, .offset= -7}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_18", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_18, // Node Params 1, // Num Node Params inputs__elementwiseneuron_18, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_18, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv2_Conv_reshape_to_2d */ const char* inputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d[] = { "_decoder_convnext_9_act_Mul_1_output_0" }; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255935359746218f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255935359746218f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_pwconv2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0136472079902887f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_convnext_9_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_pwconv2_weight), .dataSize=BINLEN(tts_ae_decoder_convnext_9_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021759865339845f, .offset= -183}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_convnext_9_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_pwconv2_bias), .dataSize=BINLEN(tts_ae_decoder_convnext_9_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv2_Conv_2d */ uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _decoder_convnext_9_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_convnext_9_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_2d_stride[] = {2}; uint32_t _decoder_convnext_9_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_convnext_9_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_convnext_9_pwconv2_Conv_2d[] = { "_decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_convnext_9_pwconv2_weight", "tts_ae_decoder_convnext_9_pwconv2_bias" }; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1267127692699432f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_convnext_9_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_convnext_9_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_convnext_9_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_convnext_9_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_pwconv2_Conv_intermediate_nchw[] = { "_decoder_convnext_9_pwconv2_Conv_intermediate" }; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1267127692699432f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv2_Conv_intermediate */ const char* inputs__decoder_convnext_9_pwconv2_Conv_intermediate[] = { "_decoder_convnext_9_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1267127692699432f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_convnext_9_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_convnext_9_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_convnext_9_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_convnext_9_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_convnext_9_pwconv2_Conv_output_0_nfc[] = { "_decoder_convnext_9_pwconv2_Conv_output_0" }; uint32_t dimensions__decoder_convnext_9_pwconv2_Conv_output_0_nfc[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1267127692699432f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_convnext_9_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_convnext_9_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_convnext_9_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_convnext_9_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ae_decoder_convnext_9_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_convnext_9_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0774471536278725f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ae_decoder_convnext_9_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_convnext_9_gamma), .dataSize=BINLEN(tts_ae_decoder_convnext_9_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_convnext_9_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_Mul */ Qnn_Param_t params__decoder_convnext_9_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__decoder_convnext_9_Mul[] = { "tts_ae_decoder_convnext_9_gamma", "_decoder_convnext_9_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__decoder_convnext_9_Mul_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.7030394077301025f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_9_Mul, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_9_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_convnext_9_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_convnext_9_Add */ Qnn_Param_t params__decoder_convnext_9_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__decoder_convnext_9_Add[] = { "_decoder_convnext_8_Add_output_0", "_decoder_convnext_9_Mul_output_0" }; uint32_t dimensions__decoder_convnext_9_Add_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_convnext_9_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_convnext_9_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.7107576727867126f, .offset= -117}}}, .rank= 3, .dimensions=dimensions__decoder_convnext_9_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_convnext_9_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__decoder_convnext_9_Add, // Node Params 1, // Num Node Params inputs__decoder_convnext_9_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_convnext_9_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__decoder_final_norm_BatchNormalization_bn_w(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__decoder_final_norm_BatchNormalization_bn_w[] = {512}; VALIDATE(model.addTensor("_decoder_final_norm_BatchNormalization_bn_w", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_final_norm_BatchNormalization_bn_w", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006832131766714f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_final_norm_BatchNormalization_bn_w, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_decoder_final_norm_BatchNormalization_bn_w), .dataSize=BINLEN(_decoder_final_norm_BatchNormalization_bn_w)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor__decoder_final_norm_BatchNormalization_bn_b(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__decoder_final_norm_BatchNormalization_bn_b[] = {512}; VALIDATE(model.addTensor("_decoder_final_norm_BatchNormalization_bn_b", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_final_norm_BatchNormalization_bn_b", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021145611535758f, .offset= -52}}}, .rank= 1, .dimensions=dimensions__decoder_final_norm_BatchNormalization_bn_b, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_decoder_final_norm_BatchNormalization_bn_b), .dataSize=BINLEN(_decoder_final_norm_BatchNormalization_bn_b)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_final_norm_BatchNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_final_norm_BatchNormalization */ const char* inputs__decoder_final_norm_BatchNormalization[] = { "_decoder_convnext_9_Add_output_0", "_decoder_final_norm_BatchNormalization_bn_w", "_decoder_final_norm_BatchNormalization_bn_b" }; uint32_t dimensions__decoder_final_norm_BatchNormalization_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_final_norm_BatchNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_final_norm_BatchNormalization_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024023719597608f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__decoder_final_norm_BatchNormalization_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_final_norm_BatchNormalization", // Node Name "qti.aisw", // Package Name "Batchnorm", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_final_norm_BatchNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_final_norm_BatchNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_Pad */ uint32_t dimensions__decoder_head_layer1_Pad_pad_amount[] = {3, 2}; uint32_t _decoder_head_layer1_Pad_pad_amount[] = {0, 0, 2, 0, 0, 0}; Qnn_Param_t params__decoder_head_layer1_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_head_layer1_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__decoder_head_layer1_Pad[] = { "_decoder_final_norm_BatchNormalization_output_0" }; uint32_t dimensions__decoder_head_layer1_Pad_output_0[] = {1, 1154, 512}; Qnn_Tensor_t outputs__decoder_head_layer1_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024023719597608f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__decoder_head_layer1_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__decoder_head_layer1_Pad, // Node Params 2, // Num Node Params inputs__decoder_head_layer1_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_Pad_output_0_ncf */ uint32_t dimensions__decoder_head_layer1_Pad_output_0_ncf_perm[] = {3}; uint32_t _decoder_head_layer1_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_head_layer1_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer1_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_layer1_Pad_output_0_ncf[] = { "_decoder_head_layer1_Pad_output_0" }; uint32_t dimensions__decoder_head_layer1_Pad_output_0_ncf[] = {1, 512, 1154}; Qnn_Tensor_t outputs__decoder_head_layer1_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024023719597608f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__decoder_head_layer1_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_layer1_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_head_layer1_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_net_Conv_reshape_to_2d */ const char* inputs__decoder_head_layer1_net_Conv_reshape_to_2d[] = { "_decoder_head_layer1_Pad_output_0_ncf" }; uint32_t dimensions__decoder_head_layer1_net_Conv_reshape_to_2d[] = {1, 512, 1, 1154}; Qnn_Tensor_t outputs__decoder_head_layer1_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024023719597608f, .offset= -105}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer1_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_head_layer1_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_head_layer1_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc[] = { "_decoder_head_layer1_net_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1154, 512}; Qnn_Tensor_t outputs__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024023719597608f, .offset= -105}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_head_layer1_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_head_layer1_net_weight[] = {1, 3, 512, 2048}; VALIDATE(model.addTensor("tts_ae_decoder_head_layer1_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_head_layer1_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0094529697671533f, .offset= -113}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_head_layer1_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_head_layer1_net_weight), .dataSize=BINLEN(tts_ae_decoder_head_layer1_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_head_layer1_net_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_head_layer1_net_bias[] = {2048}; VALIDATE(model.addTensor("tts_ae_decoder_head_layer1_net_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_head_layer1_net_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010526899714023f, .offset= -255}}}, .rank= 1, .dimensions=dimensions_tts_ae_decoder_head_layer1_net_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_head_layer1_net_bias), .dataSize=BINLEN(tts_ae_decoder_head_layer1_net_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_head_layer1_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_net_Conv_2d */ uint32_t dimensions__decoder_head_layer1_net_Conv_2d_dilation[] = {2}; uint32_t _decoder_head_layer1_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_head_layer1_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_head_layer1_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_head_layer1_net_Conv_2d_stride[] = {2}; uint32_t _decoder_head_layer1_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_head_layer1_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer1_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_head_layer1_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer1_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_head_layer1_net_Conv_2d[] = { "_decoder_head_layer1_net_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_head_layer1_net_weight", "tts_ae_decoder_head_layer1_net_bias" }; uint32_t dimensions__decoder_head_layer1_net_Conv_intermediate[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_head_layer1_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073723765090108f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer1_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_net_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_head_layer1_net_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_head_layer1_net_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__decoder_head_layer1_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_net_Conv_intermediate_nchw */ uint32_t dimensions__decoder_head_layer1_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_head_layer1_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_head_layer1_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer1_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_layer1_net_Conv_intermediate_nchw[] = { "_decoder_head_layer1_net_Conv_intermediate" }; uint32_t dimensions__decoder_head_layer1_net_Conv_intermediate_nchw[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_head_layer1_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073723765090108f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer1_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_layer1_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_head_layer1_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_net_Conv_intermediate */ const char* inputs__decoder_head_layer1_net_Conv_intermediate[] = { "_decoder_head_layer1_net_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_head_layer1_net_Conv_output_0[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_head_layer1_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073723765090108f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__decoder_head_layer1_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_head_layer1_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer1_net_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer1_net_Conv_output_0_nfc */ uint32_t dimensions__decoder_head_layer1_net_Conv_output_0_nfc_perm[] = {3}; uint32_t _decoder_head_layer1_net_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_head_layer1_net_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer1_net_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer1_net_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_layer1_net_Conv_output_0_nfc[] = { "_decoder_head_layer1_net_Conv_output_0" }; uint32_t dimensions__decoder_head_layer1_net_Conv_output_0_nfc[] = {1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_head_layer1_net_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer1_net_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073723765090108f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__decoder_head_layer1_net_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer1_net_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_layer1_net_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__decoder_head_layer1_net_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer1_net_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__PRelu_1505(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__PRelu_1505[] = {1}; VALIDATE(model.addTensor("onnx__PRelu_1505", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__PRelu_1505", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000007630926575f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_onnx__PRelu_1505, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__PRelu_1505), .dataSize=BINLEN(onnx__PRelu_1505)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_head_act_PRelu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_act_PRelu */ const char* inputs__decoder_head_act_PRelu[] = { "_decoder_head_layer1_net_Conv_output_0_nfc", "onnx__PRelu_1505" }; uint32_t dimensions__decoder_head_act_PRelu_output_0[] = {1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_head_act_PRelu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_act_PRelu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039426796138287f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__decoder_head_act_PRelu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_act_PRelu", // Node Name "qti.aisw", // Package Name "Prelu", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_head_act_PRelu, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_head_act_PRelu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_act_PRelu_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_act_PRelu_output_0_ncf */ uint32_t dimensions__decoder_head_act_PRelu_output_0_ncf_perm[] = {3}; uint32_t _decoder_head_act_PRelu_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_head_act_PRelu_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_act_PRelu_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_act_PRelu_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_act_PRelu_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_act_PRelu_output_0_ncf[] = { "_decoder_head_act_PRelu_output_0" }; uint32_t dimensions__decoder_head_act_PRelu_output_0_ncf[] = {1, 2048, 1152}; Qnn_Tensor_t outputs__decoder_head_act_PRelu_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_act_PRelu_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039426796138287f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__decoder_head_act_PRelu_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_act_PRelu_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_act_PRelu_output_0_ncf, // Node Params 1, // Num Node Params inputs__decoder_head_act_PRelu_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_act_PRelu_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer2_Conv_reshape_to_2d */ const char* inputs__decoder_head_layer2_Conv_reshape_to_2d[] = { "_decoder_head_act_PRelu_output_0_ncf" }; uint32_t dimensions__decoder_head_layer2_Conv_reshape_to_2d[] = {1, 2048, 1, 1152}; Qnn_Tensor_t outputs__decoder_head_layer2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039426796138287f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_head_layer2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__decoder_head_layer2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _decoder_head_layer2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__decoder_head_layer2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_layer2_Conv_reshape_to_2d_nhwc[] = { "_decoder_head_layer2_Conv_reshape_to_2d" }; uint32_t dimensions__decoder_head_layer2_Conv_reshape_to_2d_nhwc[] = {1, 1, 1152, 2048}; Qnn_Tensor_t outputs__decoder_head_layer2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039426796138287f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_layer2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__decoder_head_layer2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ae_decoder_head_layer2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ae_decoder_head_layer2_weight[] = {1, 1, 2048, 512}; VALIDATE(model.addTensor("tts_ae_decoder_head_layer2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ae_decoder_head_layer2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0096247438341379f, .offset= -115}}}, .rank= 4, .dimensions=dimensions_tts_ae_decoder_head_layer2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ae_decoder_head_layer2_weight), .dataSize=BINLEN(tts_ae_decoder_head_layer2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__decoder_head_layer2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer2_Conv_2d */ uint32_t dimensions__decoder_head_layer2_Conv_2d_dilation[] = {2}; uint32_t _decoder_head_layer2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__decoder_head_layer2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _decoder_head_layer2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__decoder_head_layer2_Conv_2d_stride[] = {2}; uint32_t _decoder_head_layer2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__decoder_head_layer2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__decoder_head_layer2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__decoder_head_layer2_Conv_2d[] = { "_decoder_head_layer2_Conv_reshape_to_2d_nhwc", "tts_ae_decoder_head_layer2_weight" }; uint32_t dimensions__decoder_head_layer2_Conv_intermediate[] = {1, 1, 1152, 512}; Qnn_Tensor_t outputs__decoder_head_layer2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061872061342001f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__decoder_head_layer2_Conv_2d, // Node Params 5, // Num Node Params inputs__decoder_head_layer2_Conv_2d, // Input Tensor Names 2, // Num Input Tensor Names outputs__decoder_head_layer2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer2_Conv_intermediate_nchw */ uint32_t dimensions__decoder_head_layer2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _decoder_head_layer2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__decoder_head_layer2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_layer2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_layer2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_layer2_Conv_intermediate_nchw[] = { "_decoder_head_layer2_Conv_intermediate" }; uint32_t dimensions__decoder_head_layer2_Conv_intermediate_nchw[] = {1, 512, 1, 1152}; Qnn_Tensor_t outputs__decoder_head_layer2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061872061342001f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__decoder_head_layer2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_layer2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__decoder_head_layer2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_layer2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_layer2_Conv_intermediate */ const char* inputs__decoder_head_layer2_Conv_intermediate[] = { "_decoder_head_layer2_Conv_intermediate_nchw" }; uint32_t dimensions__decoder_head_layer2_Conv_output_0[] = {1, 512, 1152}; Qnn_Tensor_t outputs__decoder_head_layer2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_layer2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061872061342001f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__decoder_head_layer2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_layer2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_head_layer2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_layer2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_Transpose */ uint32_t dimensions__decoder_head_Transpose_perm[] = {3}; uint32_t _decoder_head_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__decoder_head_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__decoder_head_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_decoder_head_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__decoder_head_Transpose[] = { "_decoder_head_layer2_Conv_output_0" }; uint32_t dimensions__decoder_head_Transpose_output_0[] = {1, 1152, 512}; Qnn_Tensor_t outputs__decoder_head_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_decoder_head_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061872061342001f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__decoder_head_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__decoder_head_Transpose, // Node Params 1, // Num Node Params inputs__decoder_head_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__decoder_head_Reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _decoder_head_Reshape */ const char* inputs__decoder_head_Reshape[] = { "_decoder_head_Transpose_output_0" }; uint32_t dimensions_wav_tts[] = {1, 589824}; Qnn_Tensor_t outputs__decoder_head_Reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "wav_tts", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0061872061342001f, .offset= -125}}}, .rank= 2, .dimensions=dimensions_wav_tts, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_decoder_head_Reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__decoder_head_Reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__decoder_head_Reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } QNN_API ModelError_t QnnModel_composeGraphs(Qnn_BackendHandle_t backendHandle, QNN_INTERFACE_VER_TYPE interface, Qnn_ContextHandle_t contextHandle, const GraphConfigInfo_t** graphsConfigInfo, const uint32_t numGraphsConfigInfo, GraphInfoPtr_t** graphsInfo, uint32_t* numGraphsInfo, bool debug, QnnLog_Callback_t logCallback, QnnLog_Level_t maxLogLevel) { ModelError_t err = MODEL_NO_ERROR; /* model/graph for vocoder_htp*/ QnnModel vocoder_htp; const QnnGraph_Config_t** graphConfigs = nullptr; VALIDATE(getQnnGraphConfigFromInfo("vocoder_htp", graphsConfigInfo, numGraphsConfigInfo, graphConfigs), err); VALIDATE(vocoder_htp.initialize(backendHandle, interface, contextHandle, "vocoder_htp", debug, DO_GRAPH_NODE_VALIDATIONS, graphConfigs), err); VALIDATE(addTensor_latent(vocoder_htp), err); VALIDATE(addTensor_tts_ttl_normalizer_scale(vocoder_htp), err); VALIDATE(addNode__Div(vocoder_htp), err); VALIDATE(addNode__Div_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__Reshape(vocoder_htp), err); VALIDATE(addNode__Transpose(vocoder_htp), err); VALIDATE(addNode__Reshape_1(vocoder_htp), err); VALIDATE(addTensor_tts_ae_latent_std(vocoder_htp), err); VALIDATE(addNode__Mul(vocoder_htp), err); VALIDATE(addTensor_tts_ae_latent_mean(vocoder_htp), err); VALIDATE(addNode__Add(vocoder_htp), err); VALIDATE(addNode__Add_output_0_nfc(vocoder_htp), err); VALIDATE(addNode__decoder_embed_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_embed_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_embed_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_embed_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_onnx__Conv_1440(vocoder_htp), err); VALIDATE(addTensor_onnx__Conv_1441(vocoder_htp), err); VALIDATE(addNode__decoder_embed_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_embed_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_embed_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_embed_net_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_0(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_0_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_0_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_2(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_1_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_1_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_4(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_2_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_2_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_6(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_3_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_3_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_8(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_4_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_4_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_10(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_5_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_5_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_12(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_6_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_6_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_14(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_7_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_7_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_16(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_8_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_8_Add(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_dwconv_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_dwconv_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_dwconv_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_norm_Transpose(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_norm_norm_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_norm_norm_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_norm_norm_LayerNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_norm_Transpose_1_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv1_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv1_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_pwconv1_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_pwconv1_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv1_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv1_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv1_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__elementwiseneuron_18(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_pwconv2_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_pwconv2_bias(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_pwconv2_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_convnext_9_gamma(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_Mul(vocoder_htp), err); VALIDATE(addNode__decoder_convnext_9_Add(vocoder_htp), err); VALIDATE(addTensor__decoder_final_norm_BatchNormalization_bn_w(vocoder_htp), err); VALIDATE(addTensor__decoder_final_norm_BatchNormalization_bn_b(vocoder_htp), err); VALIDATE(addNode__decoder_final_norm_BatchNormalization(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_Pad(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_Pad_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_net_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_net_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_head_layer1_net_weight(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_head_layer1_net_bias(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_net_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_net_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_net_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer1_net_Conv_output_0_nfc(vocoder_htp), err); VALIDATE(addTensor_onnx__PRelu_1505(vocoder_htp), err); VALIDATE(addNode__decoder_head_act_PRelu(vocoder_htp), err); VALIDATE(addNode__decoder_head_act_PRelu_output_0_ncf(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer2_Conv_reshape_to_2d(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer2_Conv_reshape_to_2d_nhwc(vocoder_htp), err); VALIDATE(addTensor_tts_ae_decoder_head_layer2_weight(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer2_Conv_2d(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer2_Conv_intermediate_nchw(vocoder_htp), err); VALIDATE(addNode__decoder_head_layer2_Conv_intermediate(vocoder_htp), err); VALIDATE(addNode__decoder_head_Transpose(vocoder_htp), err); VALIDATE(addNode__decoder_head_Reshape(vocoder_htp), err); // Add all models to array to get graphsInfo QnnModel* models [] = {&vocoder_htp}; uint32_t numModels = 1; // Populate the constructed graphs in provided output variables VALIDATE(getGraphInfoFromModels(*models, numModels, graphsInfo), err); *numGraphsInfo = numModels; return err; } // PREPARE_GRAPHS QNN_API ModelError_t QnnModel_freeGraphsInfo(GraphInfoPtr_t** graphsInfo, uint32_t numGraphsInfo){ return qnn_wrapper_api::freeGraphsInfo(graphsInfo, numGraphsInfo); } // FREEGRAPHINFO }