/* COPYRIGHT HEADER GOES HERE: No CopyRight Header String Passed During Model Conversion */ /* Command Line used: qnn-onnx-converter; act_bitwidth=8; act_quantizer=tf; act_quantizer_calibration=min-max; act_quantizer_schema=asymmetric; adjust_nms_features_dims=True; algorithms=[]; align_matmul_ranks=True; apply_masked_softmax=uncompressed; arch_checker=False; backend=None; batch=None; bias_bitwidth=8; calc_static_encodings=False; converter_op_package_lib=; copyright_file=None; custom_io=; custom_op_config_paths=None; debug=-1; defer_loading=False; define_symbol=None; disable_batchnorm_folding=False; disable_defer_loading=False; disable_node_validation=False; disable_qnn_op_config_validation=False; disable_relu_squashing=False; dry_run=None; dumpIR=False; dump_custom_io_config_template=; dump_encoding_json=False; dump_inferred_model=False; dump_qairt_io_config_yaml=; dump_qairt_quantizer_command=None; dump_value_info=False; enable_framework_trace=False; enable_match_gathernd=False; enable_match_topk=False; enable_per_row_quantized_bias=False; exclude_named_tensors=False; expand_gru_op_structure=True; expand_lstm_op_structure=False; expand_sparse_op_structure=False; export_format=cpp; extract_color_transform=True; float_bias_bitwidth=0; float_bias_bw=0; float_bitwidth=32; float_bw=32; float_fallback=False; force_prune_cast_ops=False; handle_gather_negative_indices=True; ignore_encodings=False; include_data_invariant_ops=False; inject_cast_for_gather=True; input_dim=[['noisy_latent', '1,144,192'], ['text_emb', '1,256,128'], ['style_ttl', '1,50,256'], ['latent_mask', '1,1,192'], ['text_mask', '1,1,128'], ['current_step', '1'], ['total_step', '1']]; input_dtype=[]; input_encoding=[]; input_layout=[]; input_list=./calibration_data/vector_estimator_input_list.txt; input_type=[]; keep_disconnected_nodes=False; keep_int64_inputs=False; keep_quant_nodes=False; keep_weights_quantized=False; match_caffe_ssd_to_tf=True; model_version=None; multi_time_steps_gru=False; multi_time_steps_lstm=False; no_simplification=False; op_package_lib=; out_names=['denoised_latent']; overwrite_model_prefix=False; pack_4_bit_weights=False; package_name=None; packed_masked_softmax_inputs=[]; packed_max_seq=1; param_quantizer=tf; param_quantizer_calibration=min-max; param_quantizer_schema=asymmetric; percentile_calibration_value=99.99; perform_axes_to_spatial_first_order=True; perform_layout_transformation=False; prepare_inputs_as_params=False; preprocess_roi_pool_inputs=True; preserve_io=[]; preserve_onnx_output_order=False; quantization_overrides=; quantizer_log=None; restrict_quantization_steps=[]; squash_box_decoder=True; unroll_gru_time_steps=True; unroll_lstm_time_steps=True; use_aimet_quantizer=False; use_convert_quantization_nodes=False; use_dynamic_16_bit_weights=False; use_native_dtype=False; use_native_input_files=False; use_native_output_files=False; use_per_channel_quantization=False; use_per_row_quantization=False; use_quantize_v2=False; validate_models=False; weights_bitwidth=8 */ #include "QnnOpDef.h" #include "QnnModel.hpp" // Flag to determine if Backend should do node validation for each opNode added #define DO_GRAPH_NODE_VALIDATIONS 1 using namespace qnn_wrapper_api; const __attribute__((visibility("default"))) char* QNN_SDK_VERSION = "qaisw-v2.37.1.250807093845_124904"; extern "C" { static ModelError_t addTensor_noisy_latent(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_noisy_latent[] = {1, 192, 144}; VALIDATE(model.addTensor("noisy_latent", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "noisy_latent", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0336016789078712f, .offset= -126}}}, .rank= 3, .dimensions=dimensions_noisy_latent, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_text_emb(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_text_emb[] = {1, 128, 256}; VALIDATE(model.addTensor("text_emb", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_emb", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 3, .dimensions=dimensions_text_emb, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_style_ttl(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_style_ttl[] = {1, 256, 50}; VALIDATE(model.addTensor("style_ttl", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_ttl", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 3, .dimensions=dimensions_style_ttl, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_latent_mask(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_latent_mask[] = {1, 192, 1}; VALIDATE(model.addTensor("latent_mask", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "latent_mask", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_latent_mask, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_text_mask(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_text_mask[] = {1, 128, 1}; VALIDATE(model.addTensor("text_mask", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_text_mask, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_current_step(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_current_step[] = {1}; VALIDATE(model.addTensor("current_step", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "current_step", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_current_step, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_total_step(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_total_step[] = {1}; VALIDATE(model.addTensor("total_step", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "total_step", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392156876623631f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_total_step, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode_noisy_latent_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR noisy_latent_ncf */ uint32_t dimensions_noisy_latent_ncf_perm[] = {3}; uint32_t noisy_latent_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params_noisy_latent_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "noisy_latent_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_noisy_latent_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)noisy_latent_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs_noisy_latent_ncf[] = { "noisy_latent" }; uint32_t dimensions_noisy_latent_ncf[] = {1, 144, 192}; Qnn_Tensor_t outputs_noisy_latent_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "noisy_latent_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0336016789078712f, .offset= -126}}}, .rank= 3, .dimensions=dimensions_noisy_latent_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "noisy_latent_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params_noisy_latent_ncf, // Node Params 1, // Num Node Params inputs_noisy_latent_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs_noisy_latent_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_style_ttl_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR style_ttl_ncf */ uint32_t dimensions_style_ttl_ncf_perm[] = {3}; uint32_t style_ttl_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params_style_ttl_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_ttl_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_style_ttl_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)style_ttl_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs_style_ttl_ncf[] = { "style_ttl" }; uint32_t dimensions_style_ttl_ncf[] = {1, 50, 256}; Qnn_Tensor_t outputs_style_ttl_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_ttl_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 3, .dimensions=dimensions_style_ttl_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "style_ttl_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params_style_ttl_ncf, // Node Params 1, // Num Node Params inputs_style_ttl_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs_style_ttl_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape[] = { "style_ttl_ncf" }; uint32_t dimensions__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape[] = { "style_ttl_ncf" }; uint32_t dimensions__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape[] = { "style_ttl_ncf" }; uint32_t dimensions__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape[] = { "style_ttl_ncf" }; uint32_t dimensions__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067036072723567f, .offset= -148}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_in_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_net_Conv_reshape_to_2d */ const char* inputs__vector_field_proj_in_net_Conv_reshape_to_2d[] = { "noisy_latent_ncf" }; uint32_t dimensions__vector_field_proj_in_net_Conv_reshape_to_2d[] = {1, 144, 1, 192}; Qnn_Tensor_t outputs__vector_field_proj_in_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0336016789078712f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_in_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_proj_in_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_in_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_proj_in_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_in_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc[] = { "_vector_field_proj_in_net_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 144}; Qnn_Tensor_t outputs__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0336016789078712f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_proj_in_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_proj_in_net_weight[] = {1, 1, 144, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_proj_in_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_proj_in_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025373627431691f, .offset= -123}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_proj_in_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_proj_in_net_weight), .dataSize=BINLEN(tts_ttl_vector_field_proj_in_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_proj_in_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_net_Conv_2d */ uint32_t dimensions__vector_field_proj_in_net_Conv_2d_dilation[] = {2}; uint32_t _vector_field_proj_in_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_proj_in_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_proj_in_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_proj_in_net_Conv_2d_stride[] = {2}; uint32_t _vector_field_proj_in_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_proj_in_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_in_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_in_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_proj_in_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_in_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_in_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_in_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_proj_in_net_Conv_2d[] = { "_vector_field_proj_in_net_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_proj_in_net_weight" }; uint32_t dimensions__vector_field_proj_in_net_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_proj_in_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_in_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_net_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_proj_in_net_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_proj_in_net_Conv_2d, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_proj_in_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_in_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_net_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_proj_in_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_proj_in_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_proj_in_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_in_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_in_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_proj_in_net_Conv_intermediate_nchw[] = { "_vector_field_proj_in_net_Conv_intermediate" }; uint32_t dimensions__vector_field_proj_in_net_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_proj_in_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_in_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_proj_in_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_proj_in_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_in_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_in_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_net_Conv_intermediate */ const char* inputs__vector_field_proj_in_net_Conv_intermediate[] = { "_vector_field_proj_in_net_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_proj_in_net_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_proj_in_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_proj_in_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_proj_in_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_in_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_in_net_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_net_Conv_output_0_nfc */ uint32_t dimensions__vector_field_proj_in_net_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_proj_in_net_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_proj_in_net_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_in_net_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_in_net_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_proj_in_net_Conv_output_0_nfc[] = { "_vector_field_proj_in_net_Conv_output_0" }; uint32_t dimensions__vector_field_proj_in_net_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_proj_in_net_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_net_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_proj_in_net_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_net_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_proj_in_net_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_proj_in_net_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_in_net_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape[] = { "text_emb" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0588147193193436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Reshape */ const char* inputs__Reshape[] = { "total_step" }; uint32_t dimensions__Reshape_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__Reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392156876623631f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__Reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__Reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Reshape_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Reshape_1 */ const char* inputs__Reshape_1[] = { "current_step" }; uint32_t dimensions__Reshape_1_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__Reshape_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Reshape_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__Reshape_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__Reshape_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_in_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_in_Mul */ Qnn_Param_t params__vector_field_proj_in_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_proj_in_Mul[] = { "_vector_field_proj_in_net_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_proj_in_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_proj_in_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_in_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_proj_in_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_in_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_proj_in_Mul, // Node Params 1, // Num Node Params inputs__vector_field_proj_in_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_proj_in_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_ReduceSum(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_ReduceSum */ uint32_t dimensions__vector_field_main_blocks_3_attn_ReduceSum_axes[] = {2}; uint32_t _vector_field_main_blocks_3_attn_ReduceSum_axes[] = {1, 2}; Qnn_Param_t params__vector_field_main_blocks_3_attn_ReduceSum[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_ReduceSum_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_ReduceSum_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_ReduceSum_axes, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="keep_dims", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_ReduceSum[] = { "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_3_attn_ReduceSum_output_0[] = {1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_ReduceSum[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_ReduceSum_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.6980392336845398f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_ReduceSum_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_ReduceSum", // Node Name "qti.aisw", // Package Name "ReduceSum", // Qnn Node Type params__vector_field_main_blocks_3_attn_ReduceSum, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_ReduceSum, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_ReduceSum, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_ReduceSum_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_ReduceSum_1 */ uint32_t dimensions__vector_field_main_blocks_3_attn_ReduceSum_1_axes[] = {2}; uint32_t _vector_field_main_blocks_3_attn_ReduceSum_1_axes[] = {1, 2}; Qnn_Param_t params__vector_field_main_blocks_3_attn_ReduceSum_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_ReduceSum_1_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_ReduceSum_1_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_ReduceSum_1_axes, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="keep_dims", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_ReduceSum_1[] = { "text_mask" }; uint32_t dimensions__vector_field_main_blocks_3_attn_ReduceSum_1_output_0[] = {1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_ReduceSum_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_ReduceSum_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.5019608139991760f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_ReduceSum_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_ReduceSum_1", // Node Name "qti.aisw", // Package Name "ReduceSum", // Qnn Node Type params__vector_field_main_blocks_3_attn_ReduceSum_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_ReduceSum_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_ReduceSum_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_14 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_14[] = { "text_mask" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_14_output_0[] = {1, 1, 128, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_14_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_14_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_14", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_14, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_15(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_15 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_15[] = { "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_15_output_0[] = {1, 1, 192, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_15[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_15_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_15_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_15", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_15, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_15, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3118(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3118[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3118", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3118", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046740062534809f, .offset= -133}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3118, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3118), .dataSize=BINLEN(onnx__MatMul_3118)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002310128038516f, .offset= -117}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul[] = { "_vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3118", "tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_5_attention_W_value_linear_Add_output_0_fc[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_5_attention_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_5_attention_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_5_attention_W_value_linear_Add_output_0[] = {1, 50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3163(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3163[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3163", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3163", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035856727045029f, .offset= -140}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3163, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3163), .dataSize=BINLEN(onnx__MatMul_3163)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004108679713681f, .offset= -122}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul[] = { "_vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3163", "tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_11_attention_W_value_linear_Add_output_0_fc[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_11_attention_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_11_attention_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_11_attention_W_value_linear_Add_output_0[] = {1, 50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3208(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3208[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3208", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3208", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040145115926862f, .offset= -131}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3208, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3208), .dataSize=BINLEN(onnx__MatMul_3208)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002631557290442f, .offset= -117}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul[] = { "_vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3208", "tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_17_attention_W_value_linear_Add_output_0_fc[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_17_attention_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_17_attention_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_17_attention_W_value_linear_Add_output_0[] = {1, 50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3253(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3253[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3253", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3253", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044985488057137f, .offset= -135}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3253, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3253), .dataSize=BINLEN(onnx__MatMul_3253)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002318636397831f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul[] = { "_vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3253", "tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_23_attention_W_value_linear_Add_output_0_fc[] = {50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_23_attention_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_23_attention_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_23_attention_W_value_linear_Add_output_0[] = {1, 50, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Div */ Qnn_Param_t params__Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__Div[] = { "_Reshape_1_output_0", "_Reshape_output_0" }; uint32_t dimensions__Div_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Div, // Node Params 1, // Num Node Params inputs__Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__Reciprocal_coeff(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__Reciprocal_coeff[] = {1}; VALIDATE(model.addTensor("_Reciprocal_coeff", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reciprocal_coeff", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__Reciprocal_coeff, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_Reciprocal_coeff), .dataSize=BINLEN(_Reciprocal_coeff)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__Reciprocal(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Reciprocal */ Qnn_Param_t params__Reciprocal[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__Reciprocal[] = { "_Reciprocal_coeff", "_Reshape_output_0" }; uint32_t dimensions__Reciprocal_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__Reciprocal[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reciprocal_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003921568568330f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__Reciprocal_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Reciprocal", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Reciprocal, // Node Params 1, // Num Node Params inputs__Reciprocal, // Input Tensor Names 2, // Num Input Tensor Names outputs__Reciprocal, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Reciprocal_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Reciprocal_output_0_nfc */ uint32_t dimensions__Reciprocal_output_0_nfc_perm[] = {3}; uint32_t _Reciprocal_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__Reciprocal_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reciprocal_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__Reciprocal_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_Reciprocal_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__Reciprocal_output_0_nfc[] = { "_Reciprocal_output_0" }; uint32_t dimensions__Reciprocal_output_0_nfc[] = {1, 1, 1}; Qnn_Tensor_t outputs__Reciprocal_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Reciprocal_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003921568568330f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__Reciprocal_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Reciprocal_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__Reciprocal_output_0_nfc, // Node Params 1, // Num Node Params inputs__Reciprocal_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__Reciprocal_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_Mul[] = { "_vector_field_proj_in_Mul_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3102(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3102[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3102", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3102", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067657181061804f, .offset= -125}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3102, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3102), .dataSize=BINLEN(onnx__MatMul_3102)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0063882479444146f, .offset= -103}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_key_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_key_linear_MatMul */ const char* inputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul[] = { "_vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape", "onnx__MatMul_3102", "tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_key_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_key_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_key_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_key_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_3_attn_W_key_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_key_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_key_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_key_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3103(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3103[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3103", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3103", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055587869137526f, .offset= -123}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3103, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3103), .dataSize=BINLEN(onnx__MatMul_3103)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006298169610091f, .offset= -162}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul[] = { "_vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3103", "tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_value_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_3_attn_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_value_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Reshape */ const char* inputs__vector_field_main_blocks_3_attn_Reshape[] = { "_vector_field_main_blocks_3_attn_ReduceSum_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Reshape_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.6980392336845398f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Reshape_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Reshape_1 */ const char* inputs__vector_field_main_blocks_3_attn_Reshape_1[] = { "_vector_field_main_blocks_3_attn_ReduceSum_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Reshape_1_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Reshape_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.5019608139991760f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Reshape_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Reshape_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Reshape_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Transpose_1 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Transpose_1_perm[] = {4}; uint32_t _vector_field_main_blocks_3_attn_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Transpose_1[] = { "_vector_field_main_blocks_3_attn_Unsqueeze_14_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Transpose_1_output_0[] = {1, 1, 1, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_3_attn_Transpose_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_3_attn_Constant_44_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_3_attn_Constant_44_output_0[] = {1}; VALIDATE(model.addTensor("_vector_field_main_blocks_3_attn_Constant_44_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Constant_44_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Constant_44_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_3_attn_Constant_44_output_0), .dataSize=BINLEN(_vector_field_main_blocks_3_attn_Constant_44_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Equal_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Equal_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Equal_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Equal_1[] = { "_vector_field_main_blocks_3_attn_Unsqueeze_15_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Cast_output_0[] = {1, 1, 192, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Equal_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Cast_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_BOOL_8, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Cast_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Equal_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Equal_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Equal_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Equal_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Split_2 */ uint32_t dimensions__vector_field_main_blocks_5_attention_Split_2_split_index[] = {1}; uint32_t _vector_field_main_blocks_5_attention_Split_2_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_5_attention_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_5_attention_Split_2_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Split_2[] = { "_vector_field_main_blocks_5_attention_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Split_2_output_0[] = {1, 50, 128}; uint32_t dimensions__vector_field_main_blocks_5_attention_Split_2_output_1[] = {1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_5_attention_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_attention_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Split_2, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3147(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3147[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3147", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3147", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074213664047420f, .offset= -140}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3147, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3147), .dataSize=BINLEN(onnx__MatMul_3147)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0070638749748468f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_key_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_key_linear_MatMul */ const char* inputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul[] = { "_vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape", "onnx__MatMul_3147", "tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_key_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_key_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_key_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_key_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_9_attn_W_key_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_key_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_key_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_key_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3148(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3148[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3148", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3148", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038607011083513f, .offset= -111}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3148, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3148), .dataSize=BINLEN(onnx__MatMul_3148)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014766673557460f, .offset= -139}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul[] = { "_vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3148", "tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_value_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_9_attn_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_value_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Split_2 */ uint32_t dimensions__vector_field_main_blocks_11_attention_Split_2_split_index[] = {1}; uint32_t _vector_field_main_blocks_11_attention_Split_2_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_11_attention_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_11_attention_Split_2_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Split_2[] = { "_vector_field_main_blocks_11_attention_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Split_2_output_0[] = {1, 50, 128}; uint32_t dimensions__vector_field_main_blocks_11_attention_Split_2_output_1[] = {1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_11_attention_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_attention_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Split_2, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3192(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3192[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3192", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3192", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120942573994398f, .offset= -102}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3192, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3192), .dataSize=BINLEN(onnx__MatMul_3192)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0125301266089082f, .offset= -80}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_key_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_key_linear_MatMul */ const char* inputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul[] = { "_vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape", "onnx__MatMul_3192", "tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_key_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_key_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_key_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_key_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_15_attn_W_key_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_key_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_key_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_key_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3193(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3193[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3193", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3193", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030794236809015f, .offset= -119}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3193, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3193), .dataSize=BINLEN(onnx__MatMul_3193)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003988779790234f, .offset= -200}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul[] = { "_vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3193", "tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_value_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_15_attn_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_value_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Split_2 */ uint32_t dimensions__vector_field_main_blocks_17_attention_Split_2_split_index[] = {1}; uint32_t _vector_field_main_blocks_17_attention_Split_2_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_17_attention_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_17_attention_Split_2_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Split_2[] = { "_vector_field_main_blocks_17_attention_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Split_2_output_0[] = {1, 50, 128}; uint32_t dimensions__vector_field_main_blocks_17_attention_Split_2_output_1[] = {1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_17_attention_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_attention_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Split_2, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3237(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3237[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3237", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3237", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0092592900618911f, .offset= -134}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3237, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3237), .dataSize=BINLEN(onnx__MatMul_3237)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0082191005349159f, .offset= -125}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_key_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_key_linear_MatMul */ const char* inputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul[] = { "_vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape", "onnx__MatMul_3237", "tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_key_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_key_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_key_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_key_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_21_attn_W_key_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_key_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_key_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_key_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3238(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3238[] = {256, 256}; VALIDATE(model.addTensor("onnx__MatMul_3238", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3238", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030478551052511f, .offset= -136}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3238, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3238), .dataSize=BINLEN(onnx__MatMul_3238)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001638142421143f, .offset= -112}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_value_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_value_linear_MatMul */ const char* inputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul[] = { "_vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape", "onnx__MatMul_3238", "tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_value_linear_Add_output_0_fc[] = {128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_value_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_value_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_value_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_21_attn_W_value_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_value_linear_Add_output_0[] = {1, 128, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_value_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_value_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Split_2 */ uint32_t dimensions__vector_field_main_blocks_23_attention_Split_2_split_index[] = {1}; uint32_t _vector_field_main_blocks_23_attention_Split_2_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_23_attention_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_23_attention_Split_2_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Split_2[] = { "_vector_field_main_blocks_23_attention_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Split_2_output_0[] = {1, 50, 128}; uint32_t dimensions__vector_field_main_blocks_23_attention_Split_2_output_1[] = {1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_23_attention_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_attention_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Split_2, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_sinusoidal_Reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_sinusoidal_Reshape */ const char* inputs__vector_field_time_encoder_sinusoidal_Reshape[] = { "_Div_output_0" }; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Unsqueeze_output_0[] = {1, 1}; Qnn_Tensor_t outputs__vector_field_time_encoder_sinusoidal_Reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_sinusoidal_Reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_time_encoder_sinusoidal_Reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_time_encoder_sinusoidal_Reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Split_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Split_1 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Split_1_split_index[] = {3}; uint32_t _vector_field_main_blocks_3_attn_Split_1_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Split_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_1_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_1_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Split_1_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Split_1[] = { "_vector_field_main_blocks_3_attn_W_key_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_1_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_1_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_1_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_1_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Split_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_1_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_1_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_1_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_1_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_1_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_1_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Split_1", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_3_attn_Split_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_Split_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Split_1, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Split_2 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Split_2_split_index[] = {3}; uint32_t _vector_field_main_blocks_3_attn_Split_2_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Split_2_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Split_2[] = { "_vector_field_main_blocks_3_attn_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_2_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_2_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_2_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_2_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_2_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_2_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_2_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_2_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_3_attn_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Split_2, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Equal(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Equal */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Equal[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Equal[] = { "_vector_field_main_blocks_3_attn_Transpose_1_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Cast_2_output_0[] = {1, 1, 1, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Equal[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Cast_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_BOOL_8, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Cast_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Equal", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Equal, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Equal, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Equal, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_5_attention_Unsqueeze_4[] = { "_vector_field_main_blocks_5_attention_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Unsqueeze_4_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_5_attention_Unsqueeze_5[] = { "_vector_field_main_blocks_5_attention_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Unsqueeze_5_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Split_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Split_1 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Split_1_split_index[] = {3}; uint32_t _vector_field_main_blocks_9_attn_Split_1_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Split_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_1_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_1_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Split_1_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Split_1[] = { "_vector_field_main_blocks_9_attn_W_key_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_1_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_1_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_1_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_1_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Split_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_1_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_1_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_1_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_1_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_1_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_1_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Split_1", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_9_attn_Split_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_Split_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Split_1, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Split_2 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Split_2_split_index[] = {3}; uint32_t _vector_field_main_blocks_9_attn_Split_2_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Split_2_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Split_2[] = { "_vector_field_main_blocks_9_attn_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_2_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_2_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_2_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_2_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_2_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_2_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_2_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_2_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_9_attn_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Split_2, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_11_attention_Unsqueeze_4[] = { "_vector_field_main_blocks_11_attention_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Unsqueeze_4_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_11_attention_Unsqueeze_5[] = { "_vector_field_main_blocks_11_attention_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Unsqueeze_5_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Split_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Split_1 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Split_1_split_index[] = {3}; uint32_t _vector_field_main_blocks_15_attn_Split_1_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Split_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_1_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_1_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Split_1_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Split_1[] = { "_vector_field_main_blocks_15_attn_W_key_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_1_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_1_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_1_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_1_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Split_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_1_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_1_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_1_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_1_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_1_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_1_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Split_1", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_15_attn_Split_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_Split_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Split_1, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Split_2 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Split_2_split_index[] = {3}; uint32_t _vector_field_main_blocks_15_attn_Split_2_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Split_2_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Split_2[] = { "_vector_field_main_blocks_15_attn_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_2_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_2_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_2_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_2_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_2_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_2_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_2_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_2_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_15_attn_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Split_2, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_17_attention_Unsqueeze_4[] = { "_vector_field_main_blocks_17_attention_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Unsqueeze_4_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_17_attention_Unsqueeze_5[] = { "_vector_field_main_blocks_17_attention_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Unsqueeze_5_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Split_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Split_1 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Split_1_split_index[] = {3}; uint32_t _vector_field_main_blocks_21_attn_Split_1_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Split_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_1_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_1_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Split_1_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Split_1[] = { "_vector_field_main_blocks_21_attn_W_key_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_1_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_1_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_1_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_1_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Split_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_1_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_1_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_1_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_1_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_1_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_1_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Split_1", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_21_attn_Split_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_Split_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Split_1, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Split_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Split_2 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Split_2_split_index[] = {3}; uint32_t _vector_field_main_blocks_21_attn_Split_2_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Split_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_2_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_2_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Split_2_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Split_2[] = { "_vector_field_main_blocks_21_attn_W_value_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_2_output_0[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_2_output_1[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_2_output_2[] = {1, 128, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_2_output_3[] = {1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Split_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_2_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_2_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_2_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_2_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_2_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_2_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Split_2", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_21_attn_Split_2, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_Split_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Split_2, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_23_attention_Unsqueeze_4[] = { "_vector_field_main_blocks_23_attention_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Unsqueeze_4_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_23_attention_Unsqueeze_5[] = { "_vector_field_main_blocks_23_attention_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Unsqueeze_5_output_0[] = {1, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_0_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_4[] = { "_vector_field_main_blocks_3_attn_Split_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_4_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_5[] = { "_vector_field_main_blocks_3_attn_Split_1_output_1" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_5_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_6 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_6[] = { "_vector_field_main_blocks_3_attn_Split_1_output_2" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_6_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_6", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_7 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_7[] = { "_vector_field_main_blocks_3_attn_Split_1_output_3" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_7_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_8 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_8[] = { "_vector_field_main_blocks_3_attn_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_8_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_8", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_9 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_9[] = { "_vector_field_main_blocks_3_attn_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_9_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_9", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_10 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_10[] = { "_vector_field_main_blocks_3_attn_Split_2_output_2" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_10_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_11 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_11[] = { "_vector_field_main_blocks_3_attn_Split_2_output_3" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_11_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_11", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_11, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_5_attention_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Concat_2[] = { "_vector_field_main_blocks_5_attention_Unsqueeze_4_output_0", "_vector_field_main_blocks_5_attention_Unsqueeze_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Concat_2_output_0[] = {2, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032239241991192f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_5_attention_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_attention_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_4[] = { "_vector_field_main_blocks_9_attn_Split_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_4_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_5[] = { "_vector_field_main_blocks_9_attn_Split_1_output_1" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_5_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_6 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_6[] = { "_vector_field_main_blocks_9_attn_Split_1_output_2" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_6_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_6", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_7 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_7[] = { "_vector_field_main_blocks_9_attn_Split_1_output_3" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_7_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_8 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_8[] = { "_vector_field_main_blocks_9_attn_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_8_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_8", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_9 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_9[] = { "_vector_field_main_blocks_9_attn_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_9_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_9", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_10 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_10[] = { "_vector_field_main_blocks_9_attn_Split_2_output_2" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_10_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_11 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_11[] = { "_vector_field_main_blocks_9_attn_Split_2_output_3" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_11_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_11", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_11, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_11_attention_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Concat_2[] = { "_vector_field_main_blocks_11_attention_Unsqueeze_4_output_0", "_vector_field_main_blocks_11_attention_Unsqueeze_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Concat_2_output_0[] = {2, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033338123466820f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_11_attention_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_attention_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_4[] = { "_vector_field_main_blocks_15_attn_Split_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_4_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_5[] = { "_vector_field_main_blocks_15_attn_Split_1_output_1" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_5_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_6 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_6[] = { "_vector_field_main_blocks_15_attn_Split_1_output_2" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_6_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_6", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_7 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_7[] = { "_vector_field_main_blocks_15_attn_Split_1_output_3" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_7_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_8 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_8[] = { "_vector_field_main_blocks_15_attn_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_8_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_8", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_9 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_9[] = { "_vector_field_main_blocks_15_attn_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_9_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_9", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_10 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_10[] = { "_vector_field_main_blocks_15_attn_Split_2_output_2" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_10_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_11 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_11[] = { "_vector_field_main_blocks_15_attn_Split_2_output_3" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_11_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_11", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_11, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_17_attention_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Concat_2[] = { "_vector_field_main_blocks_17_attention_Unsqueeze_4_output_0", "_vector_field_main_blocks_17_attention_Unsqueeze_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Concat_2_output_0[] = {2, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039237230084836f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_17_attention_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_attention_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_4 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_4[] = { "_vector_field_main_blocks_21_attn_Split_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_4_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_4", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_5 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_5[] = { "_vector_field_main_blocks_21_attn_Split_1_output_1" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_5_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_5", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_6 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_6[] = { "_vector_field_main_blocks_21_attn_Split_1_output_2" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_6_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_6", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_7 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_7[] = { "_vector_field_main_blocks_21_attn_Split_1_output_3" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_7_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_8 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_8[] = { "_vector_field_main_blocks_21_attn_Split_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_8_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_8", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_9 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_9[] = { "_vector_field_main_blocks_21_attn_Split_2_output_1" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_9_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_9", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_10 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_10[] = { "_vector_field_main_blocks_21_attn_Split_2_output_2" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_10_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_11 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_11[] = { "_vector_field_main_blocks_21_attn_Split_2_output_3" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_11_output_0[] = {1, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_11", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_11, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_23_attention_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Concat_2[] = { "_vector_field_main_blocks_23_attention_Unsqueeze_4_output_0", "_vector_field_main_blocks_23_attention_Unsqueeze_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Concat_2_output_0[] = {2, 1, 50, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040565114468336f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_23_attention_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_attention_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_time_encoder_sinusoidal_Constant_2_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Constant_2_output_0[] = {1}; VALIDATE(model.addTensor("_vector_field_time_encoder_sinusoidal_Constant_2_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Constant_2_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 3.9215686321258545f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Constant_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_time_encoder_sinusoidal_Constant_2_output_0), .dataSize=BINLEN(_vector_field_time_encoder_sinusoidal_Constant_2_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_sinusoidal_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_sinusoidal_Mul */ Qnn_Param_t params__vector_field_time_encoder_sinusoidal_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_time_encoder_sinusoidal_Mul[] = { "_vector_field_time_encoder_sinusoidal_Unsqueeze_output_0", "_vector_field_time_encoder_sinusoidal_Constant_2_output_0" }; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Mul_output_0[] = {1, 1}; Qnn_Tensor_t outputs__vector_field_time_encoder_sinusoidal_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_sinusoidal_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_time_encoder_sinusoidal_Mul, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_sinusoidal_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_time_encoder_sinusoidal_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0254803821444511f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0077738841064274f, .offset= -171}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013071779394522f, .offset= -121}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049912347458303f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049912347458303f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049912347458303f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049912347458303f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Concat_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Concat_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Concat_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Concat_1[] = { "_vector_field_main_blocks_3_attn_Unsqueeze_4_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_5_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_6_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_7_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Concat_1_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Concat_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Concat_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Concat_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Concat_1", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_3_attn_Concat_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Concat_1, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Concat_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Concat_2[] = { "_vector_field_main_blocks_3_attn_Unsqueeze_8_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_9_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_10_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Concat_2_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2249338924884796f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_3_attn_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Concat_2, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Concat_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Concat_1 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Concat_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Concat_1[] = { "_vector_field_main_blocks_9_attn_Unsqueeze_4_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_5_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_6_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_7_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Concat_1_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Concat_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Concat_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Concat_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Concat_1", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_9_attn_Concat_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Concat_1, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Concat_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Concat_2[] = { "_vector_field_main_blocks_9_attn_Unsqueeze_8_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_9_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_10_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Concat_2_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0967830792069435f, .offset= -138}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_9_attn_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Concat_2, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Concat_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Concat_1 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Concat_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Concat_1[] = { "_vector_field_main_blocks_15_attn_Unsqueeze_4_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_5_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_6_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_7_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Concat_1_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Concat_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Concat_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Concat_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Concat_1", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_15_attn_Concat_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Concat_1, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Concat_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Concat_2[] = { "_vector_field_main_blocks_15_attn_Unsqueeze_8_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_9_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_10_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Concat_2_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1088553145527840f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_15_attn_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Concat_2, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Concat_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Concat_1 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Concat_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Concat_1[] = { "_vector_field_main_blocks_21_attn_Unsqueeze_4_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_5_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_6_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_7_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Concat_1_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Concat_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Concat_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Concat_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Concat_1", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_21_attn_Concat_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Concat_1, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Concat_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Concat_2 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Concat_2[] = { "_vector_field_main_blocks_21_attn_Unsqueeze_8_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_9_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_10_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Concat_2_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0837441459298134f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_21_attn_Concat_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Concat_2, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_time_encoder_sinusoidal_Constant_3_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Constant_3_output_0[] = {1, 32}; VALIDATE(model.addTensor("_vector_field_time_encoder_sinusoidal_Constant_3_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Constant_3_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Constant_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_time_encoder_sinusoidal_Constant_3_output_0), .dataSize=BINLEN(_vector_field_time_encoder_sinusoidal_Constant_3_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_sinusoidal_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_sinusoidal_Mul_1 */ Qnn_Param_t params__vector_field_time_encoder_sinusoidal_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_time_encoder_sinusoidal_Mul_1[] = { "_vector_field_time_encoder_sinusoidal_Mul_output_0", "_vector_field_time_encoder_sinusoidal_Constant_3_output_0" }; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Mul_1_output_0[] = {1, 32}; Qnn_Tensor_t outputs__vector_field_time_encoder_sinusoidal_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_sinusoidal_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_time_encoder_sinusoidal_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_sinusoidal_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_time_encoder_sinusoidal_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_Mul_1[] = { "_vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049912347458303f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_3_attn_Cast_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_3_attn_Cast_output_0[] = {1, 192, 1}; VALIDATE(model.addTensor("_vector_field_main_blocks_3_attn_Cast_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Cast_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.7490196228027344f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Cast_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_3_attn_Cast_output_0), .dataSize=BINLEN(_vector_field_main_blocks_3_attn_Cast_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Div */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Div[] = { "_vector_field_main_blocks_3_attn_Cast_output_0", "_vector_field_main_blocks_3_attn_Reshape_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Div_output_0[] = {1, 192, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0267507005482912f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Div, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_3_attn_Cast_1_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_3_attn_Cast_1_output_0[] = {1, 128, 1}; VALIDATE(model.addTensor("_vector_field_main_blocks_3_attn_Cast_1_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Cast_1_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.4980392158031464f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Cast_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_3_attn_Cast_1_output_0), .dataSize=BINLEN(_vector_field_main_blocks_3_attn_Cast_1_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Div_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Div_2 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Div_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Div_2[] = { "_vector_field_main_blocks_3_attn_Cast_1_output_0", "_vector_field_main_blocks_3_attn_Reshape_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Div_2_output_0[] = {1, 128, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Div_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Div_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0160657800734043f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Div_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Div_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Div_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Div_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Div_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_sinusoidal_Sin(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_sinusoidal_Sin */ Qnn_Param_t params__vector_field_time_encoder_sinusoidal_Sin[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 14}}}} }; const char* inputs__vector_field_time_encoder_sinusoidal_Sin[] = { "_vector_field_time_encoder_sinusoidal_Mul_1_output_0" }; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Sin_output_0[] = {1, 32}; Qnn_Tensor_t outputs__vector_field_time_encoder_sinusoidal_Sin[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Sin_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000003921568634f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Sin_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_sinusoidal_Sin", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__vector_field_time_encoder_sinusoidal_Sin, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_sinusoidal_Sin, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_time_encoder_sinusoidal_Sin, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_sinusoidal_Cos(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_sinusoidal_Cos */ Qnn_Param_t params__vector_field_time_encoder_sinusoidal_Cos[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__vector_field_time_encoder_sinusoidal_Cos[] = { "_vector_field_time_encoder_sinusoidal_Mul_1_output_0" }; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Cos_output_0[] = {1, 32}; Qnn_Tensor_t outputs__vector_field_time_encoder_sinusoidal_Cos[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Cos_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Cos_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_sinusoidal_Cos", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__vector_field_time_encoder_sinusoidal_Cos, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_sinusoidal_Cos, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_time_encoder_sinusoidal_Cos, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Slice_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Slice_4 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_4_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_3_attn_Slice_4_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Slice_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_4_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_4_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Slice_4_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Slice_4[] = { "_vector_field_main_blocks_3_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_4_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Slice_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Slice_4", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_3_attn_Slice_4, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_3_attn_Slice_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Slice_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Slice_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Slice_5 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_5_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_3_attn_Slice_5_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Slice_5[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_5_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_5_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Slice_5_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Slice_5[] = { "_vector_field_main_blocks_3_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_5_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Slice_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Slice_5", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_3_attn_Slice_5, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_3_attn_Slice_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Slice_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Slice_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Slice_4 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_4_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_9_attn_Slice_4_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Slice_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_4_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_4_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Slice_4_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Slice_4[] = { "_vector_field_main_blocks_9_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_4_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Slice_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Slice_4", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_9_attn_Slice_4, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_9_attn_Slice_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Slice_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Slice_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Slice_5 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_5_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_9_attn_Slice_5_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Slice_5[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_5_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_5_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Slice_5_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Slice_5[] = { "_vector_field_main_blocks_9_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_5_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Slice_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2041020095348358f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Slice_5", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_9_attn_Slice_5, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_9_attn_Slice_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Slice_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Slice_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Slice_4 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_4_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_15_attn_Slice_4_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Slice_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_4_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_4_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Slice_4_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Slice_4[] = { "_vector_field_main_blocks_15_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_4_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Slice_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Slice_4", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_15_attn_Slice_4, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_15_attn_Slice_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Slice_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Slice_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Slice_5 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_5_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_15_attn_Slice_5_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Slice_5[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_5_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_5_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Slice_5_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Slice_5[] = { "_vector_field_main_blocks_15_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_5_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Slice_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3399716913700104f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Slice_5", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_15_attn_Slice_5, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_15_attn_Slice_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Slice_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Slice_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Slice_4 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_4_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_21_attn_Slice_4_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Slice_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_4_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_4_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Slice_4_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Slice_4[] = { "_vector_field_main_blocks_21_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_4_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Slice_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Slice_4", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_21_attn_Slice_4, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_21_attn_Slice_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Slice_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Slice_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Slice_5 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_5_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_21_attn_Slice_5_ranges[] = {0, 4, 1, 0, 1, 1, 0, 128, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Slice_5[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_5_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_5_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Slice_5_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Slice_5[] = { "_vector_field_main_blocks_21_attn_Concat_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_5_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Slice_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2734210491180420f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Slice_5", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_21_attn_Slice_5, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_21_attn_Slice_5, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Slice_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_attn_theta(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_attn_theta[] = {1, 1, 32}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_attn_theta", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_attn_theta", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392156876623631f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_attn_theta, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_attn_theta), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_attn_theta)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul[] = { "_vector_field_main_blocks_3_attn_Div_output_0", "tts_ttl_vector_field_main_blocks_3_attn_theta" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_output_0[] = {1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2675070166587830f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_7 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_7[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_7[] = { "_vector_field_main_blocks_3_attn_Div_2_output_0", "tts_ttl_vector_field_main_blocks_3_attn_theta" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_7_output_0[] = {1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1606578081846237f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_7", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_7, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_7, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_sinusoidal_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_sinusoidal_Concat */ Qnn_Param_t params__vector_field_time_encoder_sinusoidal_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__vector_field_time_encoder_sinusoidal_Concat[] = { "_vector_field_time_encoder_sinusoidal_Sin_output_0", "_vector_field_time_encoder_sinusoidal_Cos_output_0" }; uint32_t dimensions__vector_field_time_encoder_sinusoidal_Concat_output_0[] = {1, 64}; Qnn_Tensor_t outputs__vector_field_time_encoder_sinusoidal_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_sinusoidal_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_sinusoidal_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_sinusoidal_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_time_encoder_sinusoidal_Concat, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_sinusoidal_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_time_encoder_sinusoidal_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024556671269238f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021914341486990f, .offset= -133}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_0_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0155876735225320f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Sin(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Sin */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Sin[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 14}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Sin[] = { "_vector_field_main_blocks_3_attn_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Sin_output_0[] = {1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Sin[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Sin_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078431377187371f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Sin_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Sin", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Sin, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Sin, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Sin, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Cos(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Cos */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Cos[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Cos[] = { "_vector_field_main_blocks_3_attn_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Cos_output_0[] = {1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Cos[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Cos_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078431377187371f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Cos_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Cos", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Cos, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Cos, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Cos, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Sin_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Sin_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Sin_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 14}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Sin_1[] = { "_vector_field_main_blocks_3_attn_Mul_7_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Sin_1_output_0[] = {1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Sin_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Sin_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078431246802211f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Sin_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Sin_1", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Sin_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Sin_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Sin_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Cos_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Cos_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Cos_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Cos_1[] = { "_vector_field_main_blocks_3_attn_Mul_7_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Cos_1_output_0[] = {1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Cos_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Cos_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078431367874146f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Cos_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Cos_1", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Cos_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Cos_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Cos_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute[] = {256, 64}; VALIDATE(model.addTensor("tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019370688823983f, .offset= -138}}}, .rank= 2, .dimensions=dimensions_tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute), .dataSize=BINLEN(tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_time_encoder_mlp_0_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_time_encoder_mlp_0_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_time_encoder_mlp_0_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_time_encoder_mlp_0_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003305177378934f, .offset= -197}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_time_encoder_mlp_0_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_time_encoder_mlp_0_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_time_encoder_mlp_0_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_mlp_mlp_0_linear_Gemm(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_mlp_mlp_0_linear_Gemm */ const char* inputs__vector_field_time_encoder_mlp_mlp_0_linear_Gemm[] = { "_vector_field_time_encoder_sinusoidal_Concat_output_0", "tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute", "tts_ttl_vector_field_time_encoder_mlp_0_linear_bias" }; uint32_t dimensions__vector_field_time_encoder_mlp_mlp_0_linear_Gemm_output_0[] = {1, 256}; Qnn_Tensor_t outputs__vector_field_time_encoder_mlp_mlp_0_linear_Gemm[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_mlp_mlp_0_linear_Gemm_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074023571796715f, .offset= -123}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_mlp_mlp_0_linear_Gemm_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_mlp_mlp_0_linear_Gemm", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_time_encoder_mlp_mlp_0_linear_Gemm, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_time_encoder_mlp_mlp_0_linear_Gemm, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0155876735225320f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_mlp_mlp_1_Softplus(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_mlp_mlp_1_Softplus */ Qnn_Param_t params__vector_field_time_encoder_mlp_mlp_1_Softplus[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 7}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="threshold", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = -1.000000000000f}}}} }; const char* inputs__vector_field_time_encoder_mlp_mlp_1_Softplus[] = { "_vector_field_time_encoder_mlp_mlp_0_linear_Gemm_output_0" }; uint32_t dimensions__vector_field_time_encoder_mlp_mlp_1_Softplus_output_0[] = {1, 256}; Qnn_Tensor_t outputs__vector_field_time_encoder_mlp_mlp_1_Softplus[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_mlp_mlp_1_Softplus_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074023571796715f, .offset= -123}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_mlp_mlp_1_Softplus_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_mlp_mlp_1_Softplus", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__vector_field_time_encoder_mlp_mlp_1_Softplus, // Node Params 3, // Num Node Params inputs__vector_field_time_encoder_mlp_mlp_1_Softplus, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_time_encoder_mlp_mlp_1_Softplus, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_10 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_10[] = { "_vector_field_main_blocks_3_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_10_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_10", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_10, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_10, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_11 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_11[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_11[] = { "_vector_field_main_blocks_3_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_11_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0827288106083870f, .offset= -88}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_11", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_11, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_11, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_12 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_12[] = { "_vector_field_main_blocks_3_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_12_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_12_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1046580076217651f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_12_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_12", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_12, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_12, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_13 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_13[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_13[] = { "_vector_field_main_blocks_3_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_13_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1400965601205826f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_13", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_13, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_13, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_10 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_10[] = { "_vector_field_main_blocks_9_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_10_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1925131529569626f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_10", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_10, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_10, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_11 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_11[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_11[] = { "_vector_field_main_blocks_9_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_11_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1346857100725174f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_11", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_11, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_11, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_12 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_12[] = { "_vector_field_main_blocks_9_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_12_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_12_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1250589191913605f, .offset= -159}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_12_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_12", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_12, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_12, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_13 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_13[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_13[] = { "_vector_field_main_blocks_9_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_13_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1892906725406647f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_13", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_13, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_13, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_10 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_10[] = { "_vector_field_main_blocks_15_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_10_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2671738266944885f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_10", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_10, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_10, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_11 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_11[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_11[] = { "_vector_field_main_blocks_15_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_11_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1761469691991806f, .offset= -164}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_11", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_11, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_11, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_12 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_12[] = { "_vector_field_main_blocks_15_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_12_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_12_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2388384193181992f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_12_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_12", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_12, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_12, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_13 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_13[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_13[] = { "_vector_field_main_blocks_15_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_13_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2539382278919220f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_13", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_13, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_13, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_10 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_10[] = { "_vector_field_main_blocks_21_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_10_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2342285960912704f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_10", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_10, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_10, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_11(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_11 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_11[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_11[] = { "_vector_field_main_blocks_21_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_11_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_11[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_11_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2116852700710297f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_11_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_11", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_11, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_11, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_11, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_12 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_12[] = { "_vector_field_main_blocks_21_attn_Slice_4_output_0", "_vector_field_main_blocks_3_attn_Sin_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_12_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_12_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1612825244665146f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_12_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_12", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_12, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_12, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_13 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_13[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_13[] = { "_vector_field_main_blocks_21_attn_Slice_5_output_0", "_vector_field_main_blocks_3_attn_Cos_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_13_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2578404545783997f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_13", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_13, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_13, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0155876735225320f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0155876735225320f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081992130726576f, .offset= -116}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021177134476602f, .offset= -229}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0575120970606804f, .offset= -169}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0575120970606804f, .offset= -169}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0575120970606804f, .offset= -169}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_mlp_mlp_1_Tanh(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_mlp_mlp_1_Tanh */ Qnn_Param_t params__vector_field_time_encoder_mlp_mlp_1_Tanh[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 8}}}} }; const char* inputs__vector_field_time_encoder_mlp_mlp_1_Tanh[] = { "_vector_field_time_encoder_mlp_mlp_1_Softplus_output_0" }; uint32_t dimensions__vector_field_time_encoder_mlp_mlp_1_Tanh_output_0[] = {1, 256}; Qnn_Tensor_t outputs__vector_field_time_encoder_mlp_mlp_1_Tanh[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_mlp_mlp_1_Tanh_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0057766721583903f, .offset= -125}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_mlp_mlp_1_Tanh_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_mlp_mlp_1_Tanh", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__vector_field_time_encoder_mlp_mlp_1_Tanh, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_mlp_mlp_1_Tanh, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_time_encoder_mlp_mlp_1_Tanh, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Sub_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Sub_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Sub_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Sub_1[] = { "_vector_field_main_blocks_3_attn_Mul_10_output_0", "_vector_field_main_blocks_3_attn_Mul_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Sub_1_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Sub_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Sub_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Sub_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Sub_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Sub_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Sub_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Sub_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Add_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Add_3 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Add_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Add_3[] = { "_vector_field_main_blocks_3_attn_Mul_12_output_0", "_vector_field_main_blocks_3_attn_Mul_13_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Add_3_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Add_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Add_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1478076428174973f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Add_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Add_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Add_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Add_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Add_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Sub_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Sub_1 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Sub_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Sub_1[] = { "_vector_field_main_blocks_9_attn_Mul_10_output_0", "_vector_field_main_blocks_9_attn_Mul_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Sub_1_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Sub_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Sub_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1941199451684952f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Sub_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Sub_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Sub_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Sub_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Sub_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Add_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Add_3 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Add_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Add_3[] = { "_vector_field_main_blocks_9_attn_Mul_12_output_0", "_vector_field_main_blocks_9_attn_Mul_13_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Add_3_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Add_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Add_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1931796520948410f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Add_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Add_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Add_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Add_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Add_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Sub_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Sub_1 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Sub_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Sub_1[] = { "_vector_field_main_blocks_15_attn_Mul_10_output_0", "_vector_field_main_blocks_15_attn_Mul_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Sub_1_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Sub_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Sub_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2880974411964417f, .offset= -107}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Sub_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Sub_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Sub_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Sub_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Sub_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Add_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Add_3 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Add_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Add_3[] = { "_vector_field_main_blocks_15_attn_Mul_12_output_0", "_vector_field_main_blocks_15_attn_Mul_13_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Add_3_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Add_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Add_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3009932339191437f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Add_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Add_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Add_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Add_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Add_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Sub_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Sub_1 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Sub_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Sub_1[] = { "_vector_field_main_blocks_21_attn_Mul_10_output_0", "_vector_field_main_blocks_21_attn_Mul_11_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Sub_1_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Sub_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Sub_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2340422123670578f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Sub_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Sub_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Sub_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Sub_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Sub_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Add_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Add_3 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Add_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Add_3[] = { "_vector_field_main_blocks_21_attn_Mul_12_output_0", "_vector_field_main_blocks_21_attn_Mul_13_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Add_3_output_0[] = {4, 1, 128, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Add_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Add_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2532473504543304f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Add_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Add_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Add_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Add_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Add_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_mlp_mlp_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_mlp_mlp_1_Mul */ Qnn_Param_t params__vector_field_time_encoder_mlp_mlp_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_time_encoder_mlp_mlp_1_Mul[] = { "_vector_field_time_encoder_mlp_mlp_0_linear_Gemm_output_0", "_vector_field_time_encoder_mlp_mlp_1_Tanh_output_0" }; uint32_t dimensions__vector_field_time_encoder_mlp_mlp_1_Mul_output_0[] = {1, 256}; Qnn_Tensor_t outputs__vector_field_time_encoder_mlp_mlp_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_mlp_mlp_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028935084119439f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_mlp_mlp_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_mlp_mlp_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_time_encoder_mlp_mlp_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_time_encoder_mlp_mlp_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_time_encoder_mlp_mlp_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Concat_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Concat_4 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Concat_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Concat_4[] = { "_vector_field_main_blocks_3_attn_Sub_1_output_0", "_vector_field_main_blocks_3_attn_Add_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Concat_4_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Concat_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Concat_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Concat_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Concat_4", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_3_attn_Concat_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Concat_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Concat_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Concat_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Concat_4 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Concat_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Concat_4[] = { "_vector_field_main_blocks_9_attn_Sub_1_output_0", "_vector_field_main_blocks_9_attn_Add_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Concat_4_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Concat_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Concat_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2050409615039825f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Concat_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Concat_4", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_9_attn_Concat_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Concat_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Concat_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Concat_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Concat_4 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Concat_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Concat_4[] = { "_vector_field_main_blocks_15_attn_Sub_1_output_0", "_vector_field_main_blocks_15_attn_Add_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Concat_4_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Concat_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Concat_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3289195895195007f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Concat_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Concat_4", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_15_attn_Concat_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Concat_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Concat_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Concat_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Concat_4 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Concat_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Concat_4[] = { "_vector_field_main_blocks_21_attn_Sub_1_output_0", "_vector_field_main_blocks_21_attn_Add_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Concat_4_output_0[] = {4, 1, 128, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Concat_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Concat_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2532473504543304f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Concat_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Concat_4", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_21_attn_Concat_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Concat_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Concat_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute[] = {64, 256}; VALIDATE(model.addTensor("tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013426447985694f, .offset= -157}}}, .rank= 2, .dimensions=dimensions_tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute), .dataSize=BINLEN(tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_time_encoder_mlp_2_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_time_encoder_mlp_2_linear_bias[] = {64}; VALIDATE(model.addTensor("tts_ttl_vector_field_time_encoder_mlp_2_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_time_encoder_mlp_2_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002597324782982f, .offset= -190}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_time_encoder_mlp_2_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_time_encoder_mlp_2_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_time_encoder_mlp_2_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_mlp_mlp_2_linear_Gemm(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_mlp_mlp_2_linear_Gemm */ const char* inputs__vector_field_time_encoder_mlp_mlp_2_linear_Gemm[] = { "_vector_field_time_encoder_mlp_mlp_1_Mul_output_0", "tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute", "tts_ttl_vector_field_time_encoder_mlp_2_linear_bias" }; uint32_t dimensions__vector_field_time_encoder_mlp_mlp_2_linear_Gemm_output_0[] = {1, 64}; Qnn_Tensor_t outputs__vector_field_time_encoder_mlp_mlp_2_linear_Gemm[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_mlp_mlp_2_linear_Gemm_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024919810239226f, .offset= -135}}}, .rank= 2, .dimensions=dimensions__vector_field_time_encoder_mlp_mlp_2_linear_Gemm_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_mlp_mlp_2_linear_Gemm", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_time_encoder_mlp_mlp_2_linear_Gemm, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_time_encoder_mlp_mlp_2_linear_Gemm, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Transpose */ uint32_t dimensions__vector_field_main_blocks_3_attn_Transpose_perm[] = {4}; uint32_t _vector_field_main_blocks_3_attn_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Transpose[] = { "_vector_field_main_blocks_3_attn_Concat_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Transpose_output_0[] = {4, 1, 64, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1861971020698547f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_3_attn_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Transpose */ uint32_t dimensions__vector_field_main_blocks_9_attn_Transpose_perm[] = {4}; uint32_t _vector_field_main_blocks_9_attn_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_9_attn_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Transpose[] = { "_vector_field_main_blocks_9_attn_Concat_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Transpose_output_0[] = {4, 1, 64, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2050409615039825f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_9_attn_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Transpose */ uint32_t dimensions__vector_field_main_blocks_15_attn_Transpose_perm[] = {4}; uint32_t _vector_field_main_blocks_15_attn_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_15_attn_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Transpose[] = { "_vector_field_main_blocks_15_attn_Concat_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Transpose_output_0[] = {4, 1, 64, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3289195895195007f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_15_attn_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Transpose */ uint32_t dimensions__vector_field_main_blocks_21_attn_Transpose_perm[] = {4}; uint32_t _vector_field_main_blocks_21_attn_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_21_attn_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Transpose[] = { "_vector_field_main_blocks_21_attn_Concat_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Transpose_output_0[] = {4, 1, 64, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2532473504543304f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_21_attn_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_time_encoder_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_time_encoder_Unsqueeze */ const char* inputs__vector_field_time_encoder_Unsqueeze[] = { "_vector_field_time_encoder_mlp_mlp_2_linear_Gemm_output_0" }; uint32_t dimensions__vector_field_time_encoder_Unsqueeze_output_0[] = {1, 64, 1}; Qnn_Tensor_t outputs__vector_field_time_encoder_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_time_encoder_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024919810239226f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_time_encoder_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_time_encoder_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_time_encoder_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_time_encoder_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_Transpose */ uint32_t dimensions__vector_field_main_blocks_1_Transpose_perm[] = {3}; uint32_t _vector_field_main_blocks_1_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_1_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_1_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_1_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_1_Transpose[] = { "_vector_field_time_encoder_Unsqueeze_output_0" }; uint32_t dimensions__vector_field_main_blocks_1_Transpose_output_0[] = {1, 1, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024919810239226f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_1_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_1_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_1_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_56(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_56 */ Qnn_Param_t params__elementwiseneuron_56[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_56[] = { "_vector_field_main_blocks_0_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_56[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199973694980145f, .offset= -8}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_56", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_56, // Node Params 1, // Num Node Params inputs__elementwiseneuron_56, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_56, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199973694980145f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199973694980145f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072790025733411f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018405253067613f, .offset= -142}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0971632599830627f, .offset= -163}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0971632599830627f, .offset= -163}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0971632599830627f, .offset= -163}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0971632599830627f, .offset= -163}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3095(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3095[] = {512, 64}; VALIDATE(model.addTensor("onnx__MatMul_3095", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3095", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0084837963804603f, .offset= -147}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3095, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3095), .dataSize=BINLEN(onnx__MatMul_3095)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_1_linear_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_1_linear_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_1_linear_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_1_linear_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009913644753397f, .offset= -118}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_1_linear_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_1_linear_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_1_linear_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_linear_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_linear_linear_MatMul */ const char* inputs__vector_field_main_blocks_1_linear_linear_MatMul[] = { "_vector_field_main_blocks_1_Transpose_output_0", "onnx__MatMul_3095", "tts_ttl_vector_field_main_blocks_1_linear_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_1_linear_linear_Add_output_0_fc[] = {1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_linear_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_linear_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050606243312359f, .offset= -111}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_1_linear_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_linear_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_1_linear_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_1_linear_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_linear_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_linear_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_1_linear_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_1_linear_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_1_linear_linear_Add_output_0[] = {1, 1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_linear_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_linear_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050606243312359f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_1_linear_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_linear_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_1_linear_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_1_linear_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3140(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3140[] = {512, 64}; VALIDATE(model.addTensor("onnx__MatMul_3140", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3140", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040237051434815f, .offset= -122}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3140, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3140), .dataSize=BINLEN(onnx__MatMul_3140)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_7_linear_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_7_linear_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_7_linear_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_7_linear_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010562057141215f, .offset= -132}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_7_linear_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_7_linear_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_7_linear_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_7_linear_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_7_linear_linear_MatMul */ const char* inputs__vector_field_main_blocks_7_linear_linear_MatMul[] = { "_vector_field_main_blocks_1_Transpose_output_0", "onnx__MatMul_3140", "tts_ttl_vector_field_main_blocks_7_linear_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_7_linear_linear_Add_output_0_fc[] = {1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_7_linear_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_linear_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034357297699898f, .offset= -151}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_7_linear_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_7_linear_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_7_linear_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_7_linear_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_7_linear_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_7_linear_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_7_linear_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_7_linear_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_7_linear_linear_Add_output_0[] = {1, 1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_7_linear_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_linear_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034357297699898f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_7_linear_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_7_linear_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_7_linear_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_7_linear_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3185(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3185[] = {512, 64}; VALIDATE(model.addTensor("onnx__MatMul_3185", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3185", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067614624276757f, .offset= -99}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3185, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3185), .dataSize=BINLEN(onnx__MatMul_3185)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_13_linear_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_13_linear_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_13_linear_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_13_linear_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015248142881319f, .offset= -59}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_13_linear_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_13_linear_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_13_linear_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_13_linear_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_13_linear_linear_MatMul */ const char* inputs__vector_field_main_blocks_13_linear_linear_MatMul[] = { "_vector_field_main_blocks_1_Transpose_output_0", "onnx__MatMul_3185", "tts_ttl_vector_field_main_blocks_13_linear_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_13_linear_linear_Add_output_0_fc[] = {1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_13_linear_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_linear_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016950658755377f, .offset= -159}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_13_linear_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_13_linear_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_13_linear_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_13_linear_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_13_linear_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_13_linear_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_13_linear_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_13_linear_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_13_linear_linear_Add_output_0[] = {1, 1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_13_linear_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_linear_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016950658755377f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_13_linear_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_13_linear_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_13_linear_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_13_linear_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3230(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3230[] = {512, 64}; VALIDATE(model.addTensor("onnx__MatMul_3230", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3230", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056106755509973f, .offset= -109}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3230, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3230), .dataSize=BINLEN(onnx__MatMul_3230)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_19_linear_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_19_linear_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_19_linear_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_19_linear_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010970339644700f, .offset= -85}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_19_linear_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_19_linear_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_19_linear_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_19_linear_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_19_linear_linear_MatMul */ const char* inputs__vector_field_main_blocks_19_linear_linear_MatMul[] = { "_vector_field_main_blocks_1_Transpose_output_0", "onnx__MatMul_3230", "tts_ttl_vector_field_main_blocks_19_linear_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_19_linear_linear_Add_output_0_fc[] = {1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_19_linear_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_linear_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0041620451956987f, .offset= -146}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_19_linear_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_19_linear_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_19_linear_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_19_linear_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_19_linear_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_19_linear_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_19_linear_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_19_linear_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_19_linear_linear_Add_output_0[] = {1, 1, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_19_linear_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_linear_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0041620451956987f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_19_linear_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_19_linear_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_19_linear_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_19_linear_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016322587616742f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_0_convnext_0_gamma", "_vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0168111305683851f, .offset= -163}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_Transpose_1 */ uint32_t dimensions__vector_field_main_blocks_1_Transpose_1_perm[] = {3}; uint32_t _vector_field_main_blocks_1_Transpose_1_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_1_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_1_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_1_Transpose_1_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_1_Transpose_1[] = { "_vector_field_main_blocks_1_linear_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_1_Transpose_1_output_0[] = {1, 512, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050606243312359f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_1_Transpose_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_1_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_1_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_7_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_7_Transpose */ uint32_t dimensions__vector_field_main_blocks_7_Transpose_perm[] = {3}; uint32_t _vector_field_main_blocks_7_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_7_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_7_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_7_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_7_Transpose[] = { "_vector_field_main_blocks_7_linear_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_7_Transpose_output_0[] = {1, 512, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_7_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034357297699898f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_7_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_7_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_7_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_7_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_7_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_13_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_13_Transpose */ uint32_t dimensions__vector_field_main_blocks_13_Transpose_perm[] = {3}; uint32_t _vector_field_main_blocks_13_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_13_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_13_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_13_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_13_Transpose[] = { "_vector_field_main_blocks_13_linear_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_13_Transpose_output_0[] = {1, 512, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_13_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016950658755377f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_13_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_13_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_13_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_13_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_13_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_19_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_19_Transpose */ uint32_t dimensions__vector_field_main_blocks_19_Transpose_perm[] = {3}; uint32_t _vector_field_main_blocks_19_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_19_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_19_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_19_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_19_Transpose[] = { "_vector_field_main_blocks_19_linear_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_19_Transpose_output_0[] = {1, 512, 1}; Qnn_Tensor_t outputs__vector_field_main_blocks_19_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0041620451956987f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_19_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_19_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_19_Transpose, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_19_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_19_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_Add[] = { "_vector_field_main_blocks_0_convnext_0_Mul_output_0", "_vector_field_main_blocks_0_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_0_Mul_3[] = { "_vector_field_main_blocks_0_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_Mul */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_Mul[] = { "_vector_field_main_blocks_0_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 4, 4, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad[] = { "_vector_field_main_blocks_0_convnext_1_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0[] = {1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 512, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255164969712496f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055315550416708f, .offset= -105}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012870505452156f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_dilation[] = {1, 2}; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight", "tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047503174282610f, .offset= -91}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047503174282610f, .offset= -91}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047503174282610f, .offset= -91}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047503174282610f, .offset= -91}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_Mul_1[] = { "_vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047503174282610f, .offset= -91}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045848293229938f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054148356430233f, .offset= -199}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_0_convnext_1_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight", "tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0274583231657743f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0274583231657743f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0274583231657743f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0274583231657743f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111033124849200f, .offset= -145}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013895325828344f, .offset= -228}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight", "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0620905160903931f, .offset= -179}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0620905160903931f, .offset= -179}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0620905160903931f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_2 */ Qnn_Param_t params__elementwiseneuron_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_2[] = { "_vector_field_main_blocks_0_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0192324388772249f, .offset= -9}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_2", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_2, // Node Params 1, // Num Node Params inputs__elementwiseneuron_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0192324388772249f, .offset= -9}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0192324388772249f, .offset= -9}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144929094240069f, .offset= -139}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015666594263166f, .offset= -186}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight", "tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0469842217862606f, .offset= -69}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0469842217862606f, .offset= -69}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0469842217862606f, .offset= -69}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0469842217862606f, .offset= -69}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018330411985517f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_1_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_Mul_2[] = { "tts_ttl_vector_field_main_blocks_0_convnext_1_gamma", "_vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164309870451689f, .offset= -61}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_Add */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_Add[] = { "_vector_field_main_blocks_0_convnext_1_Mul_output_0", "_vector_field_main_blocks_0_convnext_1_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_1_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_1_Mul_3[] = { "_vector_field_main_blocks_0_convnext_1_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_1_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_Mul */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_Mul[] = { "_vector_field_main_blocks_0_convnext_1_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 8, 8, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad[] = { "_vector_field_main_blocks_0_convnext_2_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0[] = {1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 512, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255169961601496f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036493334919214f, .offset= -136}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008619832224213f, .offset= -119}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_dilation[] = {1, 4}; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight", "tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044187130406499f, .offset= -105}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044187130406499f, .offset= -105}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044187130406499f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044187130406499f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_Mul_1[] = { "_vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044187130406499f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027302077505738f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020413273014128f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_0_convnext_2_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight", "tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346459262073040f, .offset= -91}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346459262073040f, .offset= -91}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346459262073040f, .offset= -91}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346459262073040f, .offset= -91}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0155376261100173f, .offset= -135}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015929563669488f, .offset= -202}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight", "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0656912922859192f, .offset= -166}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0656912922859192f, .offset= -166}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0656912922859192f, .offset= -166}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_4 */ Qnn_Param_t params__elementwiseneuron_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_4[] = { "_vector_field_main_blocks_0_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0235552061349154f, .offset= -7}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_4", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_4, // Node Params 1, // Num Node Params inputs__elementwiseneuron_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0235552061349154f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0235552061349154f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0249228384345770f, .offset= -141}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008179445867427f, .offset= -123}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight", "tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0456186160445213f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0456186160445213f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0456186160445213f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0456186160445213f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017076089279726f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_2_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_Mul_2[] = { "tts_ttl_vector_field_main_blocks_0_convnext_2_gamma", "_vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0176051184535027f, .offset= -132}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_Add */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_Add[] = { "_vector_field_main_blocks_0_convnext_2_Mul_output_0", "_vector_field_main_blocks_0_convnext_2_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_2_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_2_Mul_3[] = { "_vector_field_main_blocks_0_convnext_2_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_2_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_Mul */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_Mul[] = { "_vector_field_main_blocks_0_convnext_2_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 16, 16, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad[] = { "_vector_field_main_blocks_0_convnext_3_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0[] = {1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 512, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0255311224609613f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031552824657410f, .offset= -136}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007779928855598f, .offset= -133}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_dilation[] = {1, 8}; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight", "tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043183201923966f, .offset= -86}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043183201923966f, .offset= -86}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043183201923966f, .offset= -86}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043183201923966f, .offset= -86}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_Mul_1[] = { "_vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043183201923966f, .offset= -86}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030111456289887f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024365950375795f, .offset= -142}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_0_convnext_3_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight", "tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220355931669474f, .offset= -99}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220355931669474f, .offset= -99}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220355931669474f, .offset= -99}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220355931669474f, .offset= -99}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0099700344726443f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012407404137775f, .offset= -221}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight", "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0581794492900372f, .offset= -154}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0581794492900372f, .offset= -154}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0581794492900372f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_6 */ Qnn_Param_t params__elementwiseneuron_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_6[] = { "_vector_field_main_blocks_0_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0237220413982868f, .offset= -7}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_6", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_6, // Node Params 1, // Num Node Params inputs__elementwiseneuron_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_0_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0237220413982868f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0237220413982868f, .offset= -7}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0183921363204718f, .offset= -122}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008797040791251f, .offset= -115}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight", "tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374307185411453f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374307185411453f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374307185411453f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374307185411453f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_0_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_0_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027327837888151f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_0_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_0_convnext_3_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_0_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_Mul_2[] = { "tts_ttl_vector_field_main_blocks_0_convnext_3_gamma", "_vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0144739821553230f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_Add */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_Add[] = { "_vector_field_main_blocks_0_convnext_3_Mul_output_0", "_vector_field_main_blocks_0_convnext_3_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300068929791451f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_Mul_3[] = { "_vector_field_main_blocks_0_convnext_3_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300068929791451f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf[] = { "_vector_field_main_blocks_0_convnext_3_Mul_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300068929791451f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_Add */ Qnn_Param_t params__vector_field_main_blocks_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_1_Add[] = { "_vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf", "_vector_field_main_blocks_1_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_1_Add_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_1_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_Add_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_Add_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_1_Add_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_1_Add_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_1_Add_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Add_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_1_Add_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_1_Add_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_1_Add_output_0_nfc[] = { "_vector_field_main_blocks_1_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_1_Add_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_Add_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Add_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_1_Add_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_Add_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_1_Add_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_1_Add_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_1_Add_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_1_Mul */ Qnn_Param_t params__vector_field_main_blocks_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_1_Mul[] = { "_vector_field_main_blocks_1_Add_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_1_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_Mul[] = { "_vector_field_main_blocks_1_Mul_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_2_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293396338820457f, .offset= -111}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024711792357266f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010678203543648f, .offset= -155}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090053686872125f, .offset= -108}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090053686872125f, .offset= -108}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090053686872125f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090053686872125f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_Mul_1[] = { "_vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090053686872125f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029825202655047f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016520707868040f, .offset= -152}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_2_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0356059037148952f, .offset= -168}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0356059037148952f, .offset= -168}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0356059037148952f, .offset= -168}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0356059037148952f, .offset= -168}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0204923190176487f, .offset= -105}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014241958269849f, .offset= -210}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0884725227952003f, .offset= -220}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0884725227952003f, .offset= -220}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0884725227952003f, .offset= -220}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_8 */ Qnn_Param_t params__elementwiseneuron_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_8[] = { "_vector_field_main_blocks_2_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126563729718328f, .offset= -13}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_8", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_8, // Node Params 1, // Num Node Params inputs__elementwiseneuron_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_2_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126563729718328f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0126563729718328f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0235646776854992f, .offset= -112}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012308068107814f, .offset= -106}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346070304512978f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346070304512978f, .offset= -139}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346070304512978f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0346070304512978f, .offset= -139}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_2_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_2_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033295850735158f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_2_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_2_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_2_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_2_convnext_0_gamma", "_vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0221977457404137f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_Add[] = { "_vector_field_main_blocks_2_convnext_0_Mul_output_0", "_vector_field_main_blocks_2_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0299086235463619f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_2_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_2_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_2_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_2_convnext_0_Mul_3[] = { "_vector_field_main_blocks_2_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_2_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_2_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_2_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0299086235463619f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_2_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_2_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_2_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_2_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_2_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_Mul */ Qnn_Param_t params__vector_field_main_blocks_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_Mul[] = { "_vector_field_main_blocks_2_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_3_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0299086235463619f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_3_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0299086235463619f, .offset= -118}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3101(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3101[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3101", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3101", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0112574333325028f, .offset= -129}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3101, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3101), .dataSize=BINLEN(onnx__MatMul_3101)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044484310783446f, .offset= -117}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul[] = { "_vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3101", "tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_3_attn_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_3_attn_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Split */ uint32_t dimensions__vector_field_main_blocks_3_attn_Split_split_index[] = {3}; uint32_t _vector_field_main_blocks_3_attn_Split_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Split_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Split[] = { "_vector_field_main_blocks_3_attn_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_output_0[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_output_1[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_output_2[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_output_3[] = {1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_3_attn_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Split, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze[] = { "_vector_field_main_blocks_3_attn_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_1[] = { "_vector_field_main_blocks_3_attn_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_1_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_2 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_2[] = { "_vector_field_main_blocks_3_attn_Split_output_2" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_2_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_2", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Unsqueeze_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Unsqueeze_3 */ const char* inputs__vector_field_main_blocks_3_attn_Unsqueeze_3[] = { "_vector_field_main_blocks_3_attn_Split_output_3" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Unsqueeze_3_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Unsqueeze_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Unsqueeze_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Unsqueeze_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Unsqueeze_3", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Unsqueeze_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Unsqueeze_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Concat */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Concat[] = { "_vector_field_main_blocks_3_attn_Unsqueeze_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_1_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_2_output_0", "_vector_field_main_blocks_3_attn_Unsqueeze_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Concat_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_3_attn_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Concat, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Slice_1 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_1_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_3_attn_Slice_1_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Slice_1_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Slice_1[] = { "_vector_field_main_blocks_3_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_3_attn_Slice_1, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_3_attn_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Slice_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Slice_2 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_2_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_3_attn_Slice_2_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Slice_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_2_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_2_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Slice_2_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Slice_2[] = { "_vector_field_main_blocks_3_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Slice_2_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Slice_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Slice_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1963463574647903f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Slice_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Slice_2", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_3_attn_Slice_2, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_3_attn_Slice_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Slice_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_3[] = { "_vector_field_main_blocks_3_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_3_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1132430359721184f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_4 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_4[] = { "_vector_field_main_blocks_3_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_4_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2307605147361755f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_5 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_5[] = { "_vector_field_main_blocks_3_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_5_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1171025782823563f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_5", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_5, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_6 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_6[] = { "_vector_field_main_blocks_3_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Mul_6_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Mul_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2072266191244125f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Mul_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_6", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_6, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_6, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Sub(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Sub */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Sub[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Sub[] = { "_vector_field_main_blocks_3_attn_Mul_3_output_0", "_vector_field_main_blocks_3_attn_Mul_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Sub_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Sub[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Sub_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2320123016834259f, .offset= -113}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Sub_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Sub", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Sub, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Sub, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Sub, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Add_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Add_1[] = { "_vector_field_main_blocks_3_attn_Mul_5_output_0", "_vector_field_main_blocks_3_attn_Mul_6_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Add_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Add_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2178386002779007f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Add_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Add_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Concat_3[] = { "_vector_field_main_blocks_3_attn_Sub_output_0", "_vector_field_main_blocks_3_attn_Add_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Concat_3_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2462338656187057f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_3_attn_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_MatMul */ Qnn_Param_t params__vector_field_main_blocks_3_attn_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_MatMul[] = { "_vector_field_main_blocks_3_attn_Concat_3_output_0", "_vector_field_main_blocks_3_attn_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_MatMul_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 5.3964037895202637f, .offset= -146}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_3_attn_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_3_attn_Constant_39_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_3_attn_Constant_39_output_0[] = {1}; VALIDATE(model.addTensor("_vector_field_main_blocks_3_attn_Constant_39_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Constant_39_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0627451017498970f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Constant_39_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_3_attn_Constant_39_output_0), .dataSize=BINLEN(_vector_field_main_blocks_3_attn_Constant_39_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Div_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Div_4 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Div_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Div_4[] = { "_vector_field_main_blocks_3_attn_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Div_4_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Div_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Div_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3372752368450165f, .offset= -146}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Div_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Div_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Div_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Div_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Div_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_3_attn_Constant_42_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_3_attn_Constant_42_output_0[] = {1}; VALIDATE(model.addTensor("_vector_field_main_blocks_3_attn_Constant_42_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Constant_42_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1334440575053054352202761503860850688.0000000000000000f, .offset= -255}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Constant_42_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_3_attn_Constant_42_output_0), .dataSize=BINLEN(_vector_field_main_blocks_3_attn_Constant_42_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Where */ const char* inputs__vector_field_main_blocks_3_attn_Where[] = { "_vector_field_main_blocks_21_attn_Cast_2_output_0", "_vector_field_main_blocks_3_attn_Constant_42_output_0", "_vector_field_main_blocks_3_attn_Div_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Where_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1334440575053054352202761503860850688.0000000000000000f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Softmax */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Softmax[] = { "_vector_field_main_blocks_3_attn_Where_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Softmax_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_3_attn_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Where_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Where_1 */ const char* inputs__vector_field_main_blocks_3_attn_Where_1[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_3_attn_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Where_1_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Where_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Where_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Where_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Where_1", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Where_1, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Where_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_MatMul_1[] = { "_vector_field_main_blocks_3_attn_Where_1_output_0", "_vector_field_main_blocks_3_attn_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_MatMul_1_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_3_attn_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Split_3 */ uint32_t dimensions__vector_field_main_blocks_3_attn_Split_3_split_index[] = {3}; uint32_t _vector_field_main_blocks_3_attn_Split_3_split_index[] = {1, 2, 3}; Qnn_Param_t params__vector_field_main_blocks_3_attn_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_attn_Split_3_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Split_3[] = { "_vector_field_main_blocks_3_attn_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_3_output_0[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_3_output_1[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_3_output_2[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_3_attn_Split_3_output_3[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_3_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_3_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Split_3_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Split_3_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_3_attn_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_attn_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Split_3, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Concat_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Concat_5 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Concat_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Concat_5[] = { "_vector_field_main_blocks_3_attn_Split_3_output_0", "_vector_field_main_blocks_3_attn_Split_3_output_1", "_vector_field_main_blocks_3_attn_Split_3_output_2", "_vector_field_main_blocks_3_attn_Split_3_output_3" }; uint32_t dimensions__vector_field_main_blocks_3_attn_Concat_5_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Concat_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_Concat_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_3_attn_Concat_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Concat_5", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_3_attn_Concat_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Concat_5, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Concat_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Squeeze */ const char* inputs__vector_field_main_blocks_3_attn_Squeeze[] = { "_vector_field_main_blocks_3_attn_Concat_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1748898625373840f, .offset= -161}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3110(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3110[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3110", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3110", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103174420073628f, .offset= -132}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3110, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3110), .dataSize=BINLEN(onnx__MatMul_3110)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022998047061265f, .offset= -83}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_3_attn_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3110", "tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.5822843313217163f, .offset= -133}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.5822843313217163f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_attn_Mul_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_attn_Mul_14 */ Qnn_Param_t params__vector_field_main_blocks_3_attn_Mul_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_attn_Mul_14[] = { "_vector_field_main_blocks_3_attn_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_3_Transpose_4_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_attn_Mul_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_Transpose_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.5822843313217163f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_Transpose_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_attn_Mul_14", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_attn_Mul_14, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_attn_Mul_14, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_attn_Mul_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_Add */ Qnn_Param_t params__vector_field_main_blocks_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_3_Add[] = { "_vector_field_main_blocks_3_Transpose_4_output_0", "_vector_field_main_blocks_3_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_3_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.5871314406394958f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011240961030126f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005925672594458f, .offset= -128}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_3_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_3_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_3_norm_norm_weight", "tts_ttl_vector_field_main_blocks_3_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_3_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_3_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_3_Mul_1[] = { "_vector_field_main_blocks_3_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_3_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_3_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_3_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_3_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_Mul[] = { "_vector_field_main_blocks_3_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_4_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0141162518411875f, .offset= -190}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048366063274443f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005487372982316f, .offset= -173}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029693872202188f, .offset= -118}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029693872202188f, .offset= -118}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029693872202188f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029693872202188f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_Mul_1[] = { "_vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029693872202188f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031271600164473f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026609261985868f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_4_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322511345148087f, .offset= -178}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322511345148087f, .offset= -178}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322511345148087f, .offset= -178}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322511345148087f, .offset= -178}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0097554940730333f, .offset= -117}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012894719839096f, .offset= -224}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389241278171539f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389241278171539f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389241278171539f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_10 */ Qnn_Param_t params__elementwiseneuron_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_10[] = { "_vector_field_main_blocks_4_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156368203461170f, .offset= -11}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_10", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_10, // Node Params 1, // Num Node Params inputs__elementwiseneuron_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_4_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156368203461170f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156368203461170f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0136276241391897f, .offset= -148}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009033681708388f, .offset= -143}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0280251894146204f, .offset= -159}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0280251894146204f, .offset= -159}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0280251894146204f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0280251894146204f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_4_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_4_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012498018331826f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_4_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_4_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_4_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_4_convnext_0_gamma", "_vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067809633910656f, .offset= -210}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_Add[] = { "_vector_field_main_blocks_4_convnext_0_Mul_output_0", "_vector_field_main_blocks_4_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0196198672056198f, .offset= -208}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_4_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_4_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_4_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_4_convnext_0_Mul_3[] = { "_vector_field_main_blocks_4_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_4_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_4_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_4_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0196198672056198f, .offset= -208}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_4_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_4_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_4_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_4_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_4_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_Mul */ Qnn_Param_t params__vector_field_main_blocks_5_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_5_Mul[] = { "_vector_field_main_blocks_4_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_5_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0196198672056198f, .offset= -208}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_5_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_5_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0196198672056198f, .offset= -208}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3116(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3116[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3116", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3116", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143406251445413f, .offset= -113}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3116, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3116), .dataSize=BINLEN(onnx__MatMul_3116)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043993946164846f, .offset= -138}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul[] = { "_vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3116", "tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_5_attention_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_5_attention_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_5_attention_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_5_attention_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Split */ uint32_t dimensions__vector_field_main_blocks_5_attention_Split_split_index[] = {1}; uint32_t _vector_field_main_blocks_5_attention_Split_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_5_attention_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_5_attention_Split_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Split[] = { "_vector_field_main_blocks_5_attention_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Split_output_0[] = {1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_5_attention_Split_output_1[] = {1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_5_attention_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_attention_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Split, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Unsqueeze */ const char* inputs__vector_field_main_blocks_5_attention_Unsqueeze[] = { "_vector_field_main_blocks_5_attention_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Unsqueeze_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_5_attention_Unsqueeze_1[] = { "_vector_field_main_blocks_5_attention_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Unsqueeze_1_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Concat */ Qnn_Param_t params__vector_field_main_blocks_5_attention_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Concat[] = { "_vector_field_main_blocks_5_attention_Unsqueeze_output_0", "_vector_field_main_blocks_5_attention_Unsqueeze_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Concat_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0367603041231632f, .offset= -134}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_5_attention_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_attention_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw[] = {2, 1, 128, 50}; VALIDATE(model.addTensor("_vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078330561518669f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw), .dataSize=BINLEN(_vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_MatMul */ Qnn_Param_t params__vector_field_main_blocks_5_attention_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_5_attention_MatMul[] = { "_vector_field_main_blocks_5_attention_Concat_output_0", "_vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw" }; uint32_t dimensions__vector_field_main_blocks_5_attention_MatMul_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.9133611321449280f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_5_attention_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_attention_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Div */ Qnn_Param_t params__vector_field_main_blocks_5_attention_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Div[] = { "_vector_field_main_blocks_5_attention_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Div_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0570850707590580f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_5_attention_Div, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_attention_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Softmax */ Qnn_Param_t params__vector_field_main_blocks_5_attention_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Softmax[] = { "_vector_field_main_blocks_5_attention_Div_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Softmax_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_5_attention_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_attention_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Where */ const char* inputs__vector_field_main_blocks_5_attention_Where[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_5_attention_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Where_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034749801270664f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_5_attention_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_5_attention_MatMul_1[] = { "_vector_field_main_blocks_5_attention_Where_output_0", "_vector_field_main_blocks_5_attention_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_MatMul_1_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021717627532780f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_5_attention_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_attention_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Split_3 */ uint32_t dimensions__vector_field_main_blocks_5_attention_Split_3_split_index[] = {1}; uint32_t _vector_field_main_blocks_5_attention_Split_3_split_index[] = {1}; Qnn_Param_t params__vector_field_main_blocks_5_attention_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_5_attention_Split_3_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Split_3[] = { "_vector_field_main_blocks_5_attention_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Split_3_output_0[] = {1, 1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_5_attention_Split_3_output_1[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021717627532780f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021717627532780f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_5_attention_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_attention_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Split_3, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_5_attention_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Concat_3[] = { "_vector_field_main_blocks_5_attention_Split_3_output_0", "_vector_field_main_blocks_5_attention_Split_3_output_1" }; uint32_t dimensions__vector_field_main_blocks_5_attention_Concat_3_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021717627532780f, .offset= -100}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_5_attention_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_5_attention_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_attention_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Squeeze */ const char* inputs__vector_field_main_blocks_5_attention_Squeeze[] = { "_vector_field_main_blocks_5_attention_Concat_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021717627532780f, .offset= -100}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3119(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3119[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3119", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3119", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0084575563669205f, .offset= -146}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3119, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3119), .dataSize=BINLEN(onnx__MatMul_3119)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005490796756931f, .offset= -130}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_5_attention_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3119", "tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038622221909463f, .offset= -118}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038622221909463f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_attention_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_attention_Mul */ Qnn_Param_t params__vector_field_main_blocks_5_attention_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_5_attention_Mul[] = { "_vector_field_main_blocks_5_attention_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_5_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_attention_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038622221909463f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_attention_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_5_attention_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_attention_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_attention_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_Add */ Qnn_Param_t params__vector_field_main_blocks_5_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_5_Add[] = { "_vector_field_main_blocks_5_Transpose_1_output_0", "_vector_field_main_blocks_5_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_5_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199255757033825f, .offset= -209}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_5_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_5_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_5_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_5_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_5_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016021378105506f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_5_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_5_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_5_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_5_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_5_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_5_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_5_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017054076306522f, .offset= -197}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_5_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_5_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_5_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_5_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_5_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_5_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_5_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_5_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_5_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_5_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_5_norm_norm_weight", "tts_ttl_vector_field_main_blocks_5_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_5_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_5_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_5_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_5_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_5_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_5_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_5_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_5_Mul_1[] = { "_vector_field_main_blocks_5_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_5_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_5_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_5_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_5_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_5_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_5_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_5_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_5_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_Mul[] = { "_vector_field_main_blocks_5_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_6_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225985031574965f, .offset= -179}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069155460223556f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007753538084216f, .offset= -150}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023247539065778f, .offset= -102}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023247539065778f, .offset= -102}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023247539065778f, .offset= -102}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023247539065778f, .offset= -102}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_Mul_1[] = { "_vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023247539065778f, .offset= -102}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035690322984010f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015356119256467f, .offset= -170}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_6_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219673067331314f, .offset= -148}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219673067331314f, .offset= -148}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219673067331314f, .offset= -148}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219673067331314f, .offset= -148}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0149883758276701f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019457406597212f, .offset= -216}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0464291572570801f, .offset= -149}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0464291572570801f, .offset= -149}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0464291572570801f, .offset= -149}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_12 */ Qnn_Param_t params__elementwiseneuron_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_12[] = { "_vector_field_main_blocks_6_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199706722050905f, .offset= -9}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_12", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_12, // Node Params 1, // Num Node Params inputs__elementwiseneuron_12, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199706722050905f, .offset= -9}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199706722050905f, .offset= -9}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085662277415395f, .offset= -105}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022898847237229f, .offset= -192}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322449319064617f, .offset= -144}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322449319064617f, .offset= -144}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322449319064617f, .offset= -144}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322449319064617f, .offset= -144}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021308227442205f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_6_convnext_0_gamma", "_vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0080644506961107f, .offset= -79}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_Add[] = { "_vector_field_main_blocks_6_convnext_0_Mul_output_0", "_vector_field_main_blocks_6_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_0_Mul_3[] = { "_vector_field_main_blocks_6_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_Mul */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_Mul[] = { "_vector_field_main_blocks_6_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 4, 4, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad[] = { "_vector_field_main_blocks_6_convnext_1_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0[] = {1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 512, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0246391128748655f, .offset= -185}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056706005707383f, .offset= -105}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007280668360181f, .offset= -155}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_dilation[] = {1, 2}; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight", "tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027473221998662f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027473221998662f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027473221998662f, .offset= -117}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027473221998662f, .offset= -117}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_Mul_1[] = { "_vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027473221998662f, .offset= -117}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036509109195322f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024417901877314f, .offset= -105}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_6_convnext_1_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight", "tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308291688561440f, .offset= -78}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308291688561440f, .offset= -78}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308291688561440f, .offset= -78}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0308291688561440f, .offset= -78}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0145748434588313f, .offset= -109}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018318408401683f, .offset= -218}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight", "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0573830381035805f, .offset= -187}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0573830381035805f, .offset= -187}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0573830381035805f, .offset= -187}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_14 */ Qnn_Param_t params__elementwiseneuron_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_14[] = { "_vector_field_main_blocks_6_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159959122538567f, .offset= -11}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_14", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_14, // Node Params 1, // Num Node Params inputs__elementwiseneuron_14, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159959122538567f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0159959122538567f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0093788644298911f, .offset= -94}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033166850917041f, .offset= -205}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight", "tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374460704624653f, .offset= -108}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374460704624653f, .offset= -108}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374460704624653f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0374460704624653f, .offset= -108}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015286388806999f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_1_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_Mul_2[] = { "tts_ttl_vector_field_main_blocks_6_convnext_1_gamma", "_vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072241723537445f, .offset= -105}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_Add */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_Add[] = { "_vector_field_main_blocks_6_convnext_1_Mul_output_0", "_vector_field_main_blocks_6_convnext_1_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_1_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_1_Mul_3[] = { "_vector_field_main_blocks_6_convnext_1_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_1_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_Mul */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_Mul[] = { "_vector_field_main_blocks_6_convnext_1_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 8, 8, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad[] = { "_vector_field_main_blocks_6_convnext_2_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0[] = {1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 512, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0273092761635780f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046149091795087f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007286638137884f, .offset= -139}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_dilation[] = {1, 4}; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight", "tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046522896736860f, .offset= -79}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046522896736860f, .offset= -79}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046522896736860f, .offset= -79}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046522896736860f, .offset= -79}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_Mul_1[] = { "_vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046522896736860f, .offset= -79}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024980793241411f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029565363656729f, .offset= -98}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_6_convnext_2_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight", "tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350780598819256f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350780598819256f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350780598819256f, .offset= -70}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350780598819256f, .offset= -70}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120834298431873f, .offset= -109}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019284952431917f, .offset= -227}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight", "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0595903322100639f, .offset= -190}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0595903322100639f, .offset= -190}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0595903322100639f, .offset= -190}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_16 */ Qnn_Param_t params__elementwiseneuron_16[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_16[] = { "_vector_field_main_blocks_6_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157448258250952f, .offset= -11}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_16", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_16, // Node Params 1, // Num Node Params inputs__elementwiseneuron_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157448258250952f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0157448258250952f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0092445136979222f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015315643977374f, .offset= -169}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight", "tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0450943931937218f, .offset= -111}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0450943931937218f, .offset= -111}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0450943931937218f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0450943931937218f, .offset= -111}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014720971230417f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_2_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_Mul_2[] = { "tts_ttl_vector_field_main_blocks_6_convnext_2_gamma", "_vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083009609952569f, .offset= -107}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_Add */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_Add[] = { "_vector_field_main_blocks_6_convnext_2_Mul_output_0", "_vector_field_main_blocks_6_convnext_2_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_2_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_2_Mul_3[] = { "_vector_field_main_blocks_6_convnext_2_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_2_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_Mul */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_Mul[] = { "_vector_field_main_blocks_6_convnext_2_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 16, 16, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad[] = { "_vector_field_main_blocks_6_convnext_3_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0[] = {1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 512, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0275711007416248f, .offset= -192}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050801350735128f, .offset= -150}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007901859353296f, .offset= -166}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_dilation[] = {1, 8}; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight", "tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044866222888231f, .offset= -96}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044866222888231f, .offset= -96}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044866222888231f, .offset= -96}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044866222888231f, .offset= -96}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_Mul_1[] = { "_vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044866222888231f, .offset= -96}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032312844414264f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030414203647524f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_6_convnext_3_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight", "tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0390804149210453f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0390804149210453f, .offset= -70}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0390804149210453f, .offset= -70}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0390804149210453f, .offset= -70}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0187172964215279f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024945822078735f, .offset= -214}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight", "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0611503645777702f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0611503645777702f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0611503645777702f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_18(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_18 */ Qnn_Param_t params__elementwiseneuron_18[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_18[] = { "_vector_field_main_blocks_6_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_18[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0266130827367306f, .offset= -6}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_18", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_18, // Node Params 1, // Num Node Params inputs__elementwiseneuron_18, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_18, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_6_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0266130827367306f, .offset= -6}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0266130827367306f, .offset= -6}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138118863105774f, .offset= -131}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010402387706563f, .offset= -130}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight", "tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0365706458687782f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0365706458687782f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0365706458687782f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0365706458687782f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_6_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_6_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030636650044471f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_6_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_6_convnext_3_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_6_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_Mul_2[] = { "tts_ttl_vector_field_main_blocks_6_convnext_3_gamma", "_vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0151488045230508f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_Add */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_Add[] = { "_vector_field_main_blocks_6_convnext_3_Mul_output_0", "_vector_field_main_blocks_6_convnext_3_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0288693439215422f, .offset= -189}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_Mul_3[] = { "_vector_field_main_blocks_6_convnext_3_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0288693439215422f, .offset= -189}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf[] = { "_vector_field_main_blocks_6_convnext_3_Mul_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0288693439215422f, .offset= -189}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_7_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_7_Add */ Qnn_Param_t params__vector_field_main_blocks_7_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_7_Add[] = { "_vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf", "_vector_field_main_blocks_7_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_7_Add_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_7_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_7_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_7_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_7_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_7_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_7_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_7_Add_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_7_Add_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_7_Add_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_7_Add_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_7_Add_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_Add_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_7_Add_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_7_Add_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_7_Add_output_0_nfc[] = { "_vector_field_main_blocks_7_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_7_Add_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_7_Add_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_Add_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_7_Add_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_7_Add_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_7_Add_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_7_Add_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_7_Add_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_7_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_7_Mul */ Qnn_Param_t params__vector_field_main_blocks_7_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_7_Mul[] = { "_vector_field_main_blocks_7_Add_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_7_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_7_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_7_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_7_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_7_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_7_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_7_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_7_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_Mul[] = { "_vector_field_main_blocks_7_Mul_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_8_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0300718564540148f, .offset= -185}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047682402655482f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011977187823504f, .offset= -80}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091555546969175f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091555546969175f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091555546969175f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091555546969175f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_Mul_1[] = { "_vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091555546969175f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037887489888817f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021448701154441f, .offset= -76}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_8_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0375283509492874f, .offset= -172}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0375283509492874f, .offset= -172}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0375283509492874f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0375283509492874f, .offset= -172}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190949998795986f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021459544077516f, .offset= -227}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0546567589044571f, .offset= -173}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0546567589044571f, .offset= -173}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0546567589044571f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_20(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_20 */ Qnn_Param_t params__elementwiseneuron_20[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_20[] = { "_vector_field_main_blocks_8_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_20[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182111728936434f, .offset= -9}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_20", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_20, // Node Params 1, // Num Node Params inputs__elementwiseneuron_20, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_20, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_8_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182111728936434f, .offset= -9}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0182111728936434f, .offset= -9}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0165382493287325f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013767746277153f, .offset= -123}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0625165700912476f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0625165700912476f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0625165700912476f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0625165700912476f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_8_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_8_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036242078058422f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_8_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_8_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_8_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_8_convnext_0_gamma", "_vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0219416711479425f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_Add[] = { "_vector_field_main_blocks_8_convnext_0_Mul_output_0", "_vector_field_main_blocks_8_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0368742346763611f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_8_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_8_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_8_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_8_convnext_0_Mul_3[] = { "_vector_field_main_blocks_8_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_8_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_8_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_8_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0368742346763611f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_8_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_8_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_8_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_8_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_8_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_Mul */ Qnn_Param_t params__vector_field_main_blocks_9_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_Mul[] = { "_vector_field_main_blocks_8_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_9_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0368742346763611f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_9_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0368742346763611f, .offset= -162}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3146(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3146[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3146", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3146", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111485216766596f, .offset= -125}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3146, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3146), .dataSize=BINLEN(onnx__MatMul_3146)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043090460821986f, .offset= -147}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul[] = { "_vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3146", "tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_9_attn_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_9_attn_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Split */ uint32_t dimensions__vector_field_main_blocks_9_attn_Split_split_index[] = {3}; uint32_t _vector_field_main_blocks_9_attn_Split_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Split_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Split[] = { "_vector_field_main_blocks_9_attn_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_output_0[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_output_1[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_output_2[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_output_3[] = {1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_9_attn_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Split, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze[] = { "_vector_field_main_blocks_9_attn_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_1[] = { "_vector_field_main_blocks_9_attn_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_1_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_2 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_2[] = { "_vector_field_main_blocks_9_attn_Split_output_2" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_2_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_2", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Unsqueeze_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Unsqueeze_3 */ const char* inputs__vector_field_main_blocks_9_attn_Unsqueeze_3[] = { "_vector_field_main_blocks_9_attn_Split_output_3" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Unsqueeze_3_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Unsqueeze_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Unsqueeze_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Unsqueeze_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Unsqueeze_3", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Unsqueeze_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Unsqueeze_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Concat */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Concat[] = { "_vector_field_main_blocks_9_attn_Unsqueeze_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_1_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_2_output_0", "_vector_field_main_blocks_9_attn_Unsqueeze_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Concat_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_9_attn_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Concat, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Slice_1 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_1_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_9_attn_Slice_1_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Slice_1_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Slice_1[] = { "_vector_field_main_blocks_9_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_9_attn_Slice_1, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_9_attn_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Slice_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Slice_2 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_2_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_9_attn_Slice_2_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Slice_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_2_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_2_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Slice_2_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Slice_2[] = { "_vector_field_main_blocks_9_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Slice_2_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Slice_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Slice_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3311906754970551f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Slice_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Slice_2", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_9_attn_Slice_2, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_9_attn_Slice_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Slice_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_3[] = { "_vector_field_main_blocks_9_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_3_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3197596371173859f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_4 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_4[] = { "_vector_field_main_blocks_9_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_4_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1359950006008148f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_5 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_5[] = { "_vector_field_main_blocks_9_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_5_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2833542823791504f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_5", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_5, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_6 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_6[] = { "_vector_field_main_blocks_9_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Mul_6_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Mul_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1429921686649323f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Mul_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_6", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_6, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_6, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Sub(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Sub */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Sub[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Sub[] = { "_vector_field_main_blocks_9_attn_Mul_3_output_0", "_vector_field_main_blocks_9_attn_Mul_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Sub_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Sub[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Sub_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3288011550903320f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Sub_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Sub", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Sub, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Sub, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Sub, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Add_1 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Add_1[] = { "_vector_field_main_blocks_9_attn_Mul_5_output_0", "_vector_field_main_blocks_9_attn_Mul_6_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Add_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Add_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2840076684951782f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Add_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Add_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Concat_3[] = { "_vector_field_main_blocks_9_attn_Sub_output_0", "_vector_field_main_blocks_9_attn_Add_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Concat_3_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3288011550903320f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_9_attn_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_MatMul */ Qnn_Param_t params__vector_field_main_blocks_9_attn_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_MatMul[] = { "_vector_field_main_blocks_9_attn_Concat_3_output_0", "_vector_field_main_blocks_9_attn_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_MatMul_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 7.0574264526367188f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_9_attn_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Div_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Div_4 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Div_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Div_4[] = { "_vector_field_main_blocks_9_attn_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Div_4_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Div_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Div_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.4410891532897949f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Div_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Div_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Div_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Div_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Div_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Where */ const char* inputs__vector_field_main_blocks_9_attn_Where[] = { "_vector_field_main_blocks_21_attn_Cast_2_output_0", "_vector_field_main_blocks_3_attn_Constant_42_output_0", "_vector_field_main_blocks_9_attn_Div_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Where_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1334440575053054352202761503860850688.0000000000000000f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Softmax */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Softmax[] = { "_vector_field_main_blocks_9_attn_Where_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Softmax_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_9_attn_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Where_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Where_1 */ const char* inputs__vector_field_main_blocks_9_attn_Where_1[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_9_attn_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Where_1_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Where_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Where_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Where_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Where_1", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Where_1, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Where_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_MatMul_1[] = { "_vector_field_main_blocks_9_attn_Where_1_output_0", "_vector_field_main_blocks_9_attn_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_MatMul_1_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_9_attn_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Split_3 */ uint32_t dimensions__vector_field_main_blocks_9_attn_Split_3_split_index[] = {3}; uint32_t _vector_field_main_blocks_9_attn_Split_3_split_index[] = {1, 2, 3}; Qnn_Param_t params__vector_field_main_blocks_9_attn_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_attn_Split_3_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Split_3[] = { "_vector_field_main_blocks_9_attn_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_3_output_0[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_3_output_1[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_3_output_2[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_9_attn_Split_3_output_3[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_3_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_3_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Split_3_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Split_3_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_9_attn_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_attn_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Split_3, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Concat_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Concat_5 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Concat_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Concat_5[] = { "_vector_field_main_blocks_9_attn_Split_3_output_0", "_vector_field_main_blocks_9_attn_Split_3_output_1", "_vector_field_main_blocks_9_attn_Split_3_output_2", "_vector_field_main_blocks_9_attn_Split_3_output_3" }; uint32_t dimensions__vector_field_main_blocks_9_attn_Concat_5_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Concat_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_Concat_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_9_attn_Concat_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Concat_5", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_9_attn_Concat_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Concat_5, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Concat_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Squeeze */ const char* inputs__vector_field_main_blocks_9_attn_Squeeze[] = { "_vector_field_main_blocks_9_attn_Concat_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0818345472216606f, .offset= -116}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3155(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3155[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3155", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3155", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0096853189170361f, .offset= -109}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3155, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3155), .dataSize=BINLEN(onnx__MatMul_3155)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023061716929078f, .offset= -138}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_9_attn_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3155", "tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2794734239578247f, .offset= -129}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2794734239578247f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_attn_Mul_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_attn_Mul_14 */ Qnn_Param_t params__vector_field_main_blocks_9_attn_Mul_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_attn_Mul_14[] = { "_vector_field_main_blocks_9_attn_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_9_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_attn_Mul_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2794734239578247f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_attn_Mul_14", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_attn_Mul_14, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_attn_Mul_14, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_attn_Mul_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_Add */ Qnn_Param_t params__vector_field_main_blocks_9_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_9_Add[] = { "_vector_field_main_blocks_9_Transpose_1_output_0", "_vector_field_main_blocks_9_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_9_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2760909795761108f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_9_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_9_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_9_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_9_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011302456259727f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_9_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_9_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_9_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_9_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_9_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_9_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_9_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011574659729376f, .offset= -111}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_9_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_9_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_9_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_9_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_9_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_9_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_9_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_9_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_9_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_9_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_9_norm_norm_weight", "tts_ttl_vector_field_main_blocks_9_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_9_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_9_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_9_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_9_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_9_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_9_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_9_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_9_Mul_1[] = { "_vector_field_main_blocks_9_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_9_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_9_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_9_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_9_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_9_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_9_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_9_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_9_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_Mul[] = { "_vector_field_main_blocks_9_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_10_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184952430427074f, .offset= -203}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047217723913491f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008063623099588f, .offset= -149}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022905915975571f, .offset= -159}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022905915975571f, .offset= -159}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022905915975571f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022905915975571f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_Mul_1[] = { "_vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022905915975571f, .offset= -159}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045520714484155f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014833961613476f, .offset= -100}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_10_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0445230491459370f, .offset= -104}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0445230491459370f, .offset= -104}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0445230491459370f, .offset= -104}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0445230491459370f, .offset= -104}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0193188339471817f, .offset= -139}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020085731521249f, .offset= -229}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0399573557078838f, .offset= -182}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0399573557078838f, .offset= -182}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0399573557078838f, .offset= -182}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_22(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_22 */ Qnn_Param_t params__elementwiseneuron_22[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_22[] = { "_vector_field_main_blocks_10_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_22[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120771508663893f, .offset= -14}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_22", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_22, // Node Params 1, // Num Node Params inputs__elementwiseneuron_22, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_22, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_10_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120771508663893f, .offset= -14}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0120771508663893f, .offset= -14}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116075547412038f, .offset= -115}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010659394320101f, .offset= -77}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250323265790939f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250323265790939f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250323265790939f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250323265790939f, .offset= -127}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_10_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_10_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016617102082819f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_10_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_10_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_10_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_10_convnext_0_gamma", "_vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072065624408424f, .offset= -67}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_Add[] = { "_vector_field_main_blocks_10_convnext_0_Mul_output_0", "_vector_field_main_blocks_10_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225767008960247f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_10_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_10_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_10_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_10_convnext_0_Mul_3[] = { "_vector_field_main_blocks_10_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_10_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_10_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_10_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225767008960247f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_10_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_10_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_10_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_10_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_10_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_Mul */ Qnn_Param_t params__vector_field_main_blocks_11_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_11_Mul[] = { "_vector_field_main_blocks_10_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_11_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225767008960247f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_11_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_11_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225767008960247f, .offset= -176}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3161(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3161[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3161", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3161", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0131983105093241f, .offset= -114}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3161, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3161), .dataSize=BINLEN(onnx__MatMul_3161)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049904482439160f, .offset= -134}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul[] = { "_vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3161", "tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_11_attention_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_11_attention_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_11_attention_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_11_attention_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Split */ uint32_t dimensions__vector_field_main_blocks_11_attention_Split_split_index[] = {1}; uint32_t _vector_field_main_blocks_11_attention_Split_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_11_attention_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_11_attention_Split_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Split[] = { "_vector_field_main_blocks_11_attention_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Split_output_0[] = {1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_11_attention_Split_output_1[] = {1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_11_attention_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_attention_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Split, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Unsqueeze */ const char* inputs__vector_field_main_blocks_11_attention_Unsqueeze[] = { "_vector_field_main_blocks_11_attention_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Unsqueeze_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_11_attention_Unsqueeze_1[] = { "_vector_field_main_blocks_11_attention_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Unsqueeze_1_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Concat */ Qnn_Param_t params__vector_field_main_blocks_11_attention_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Concat[] = { "_vector_field_main_blocks_11_attention_Unsqueeze_output_0", "_vector_field_main_blocks_11_attention_Unsqueeze_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Concat_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481216795742512f, .offset= -109}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_11_attention_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_attention_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw[] = {2, 1, 128, 50}; VALIDATE(model.addTensor("_vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078411987051368f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw), .dataSize=BINLEN(_vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_MatMul */ Qnn_Param_t params__vector_field_main_blocks_11_attention_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_11_attention_MatMul[] = { "_vector_field_main_blocks_11_attention_Concat_output_0", "_vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw" }; uint32_t dimensions__vector_field_main_blocks_11_attention_MatMul_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1.0278865098953247f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_11_attention_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_attention_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Div */ Qnn_Param_t params__vector_field_main_blocks_11_attention_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Div[] = { "_vector_field_main_blocks_11_attention_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Div_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0642429068684578f, .offset= -112}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_11_attention_Div, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_attention_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Softmax */ Qnn_Param_t params__vector_field_main_blocks_11_attention_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Softmax[] = { "_vector_field_main_blocks_11_attention_Div_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Softmax_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_11_attention_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_attention_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Where */ const char* inputs__vector_field_main_blocks_11_attention_Where[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_11_attention_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Where_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036746696569026f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_11_attention_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_11_attention_MatMul_1[] = { "_vector_field_main_blocks_11_attention_Where_output_0", "_vector_field_main_blocks_11_attention_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_MatMul_1_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018725182162598f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_11_attention_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_attention_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Split_3 */ uint32_t dimensions__vector_field_main_blocks_11_attention_Split_3_split_index[] = {1}; uint32_t _vector_field_main_blocks_11_attention_Split_3_split_index[] = {1}; Qnn_Param_t params__vector_field_main_blocks_11_attention_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_11_attention_Split_3_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Split_3[] = { "_vector_field_main_blocks_11_attention_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Split_3_output_0[] = {1, 1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_11_attention_Split_3_output_1[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018725182162598f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018725182162598f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_11_attention_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_attention_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Split_3, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_11_attention_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Concat_3[] = { "_vector_field_main_blocks_11_attention_Split_3_output_0", "_vector_field_main_blocks_11_attention_Split_3_output_1" }; uint32_t dimensions__vector_field_main_blocks_11_attention_Concat_3_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018725182162598f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_11_attention_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_11_attention_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_attention_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Squeeze */ const char* inputs__vector_field_main_blocks_11_attention_Squeeze[] = { "_vector_field_main_blocks_11_attention_Concat_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018725182162598f, .offset= -124}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3164(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3164[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3164", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3164", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0065189413726330f, .offset= -141}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3164, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3164), .dataSize=BINLEN(onnx__MatMul_3164)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010570901213214f, .offset= -56}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_11_attention_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3164", "tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073833800852299f, .offset= -59}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073833800852299f, .offset= -59}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_attention_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_attention_Mul */ Qnn_Param_t params__vector_field_main_blocks_11_attention_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_11_attention_Mul[] = { "_vector_field_main_blocks_11_attention_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_11_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_attention_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073833800852299f, .offset= -59}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_attention_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_11_attention_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_attention_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_attention_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_Add */ Qnn_Param_t params__vector_field_main_blocks_11_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_11_Add[] = { "_vector_field_main_blocks_11_Transpose_1_output_0", "_vector_field_main_blocks_11_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_11_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0272946022450924f, .offset= -152}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_11_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_11_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_11_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_11_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_11_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011638227151707f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_11_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_11_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_11_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_11_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_11_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_11_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_11_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006646314286627f, .offset= -144}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_11_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_11_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_11_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_11_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_11_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_11_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_11_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_11_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_11_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_11_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_11_norm_norm_weight", "tts_ttl_vector_field_main_blocks_11_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_11_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_11_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_11_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_11_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_11_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_11_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_11_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_11_Mul_1[] = { "_vector_field_main_blocks_11_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_11_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_11_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_11_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_11_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_11_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_11_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_11_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_11_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_Mul[] = { "_vector_field_main_blocks_11_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_12_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181347224861383f, .offset= -191}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0082693500444293f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006068511283956f, .offset= -158}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027987780049443f, .offset= -160}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027987780049443f, .offset= -160}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027987780049443f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027987780049443f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_Mul_1[] = { "_vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027987780049443f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031817646231502f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020508121233433f, .offset= -85}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_12_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279297027736902f, .offset= -98}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279297027736902f, .offset= -98}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279297027736902f, .offset= -98}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0279297027736902f, .offset= -98}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0137027101591229f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025821530725807f, .offset= -223}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0373477935791016f, .offset= -189}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0373477935791016f, .offset= -189}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0373477935791016f, .offset= -189}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_24(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_24 */ Qnn_Param_t params__elementwiseneuron_24[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_24[] = { "_vector_field_main_blocks_12_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_24[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103288367390633f, .offset= -16}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_24", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_24, // Node Params 1, // Num Node Params inputs__elementwiseneuron_24, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_24, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103288367390633f, .offset= -16}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103288367390633f, .offset= -16}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0082772038877010f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008771696011536f, .offset= -123}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133939506486058f, .offset= -110}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133939506486058f, .offset= -110}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133939506486058f, .offset= -110}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133939506486058f, .offset= -110}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007938269409351f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_12_convnext_0_gamma", "_vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024870922788978f, .offset= -102}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_Add[] = { "_vector_field_main_blocks_12_convnext_0_Mul_output_0", "_vector_field_main_blocks_12_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_0_Mul_3[] = { "_vector_field_main_blocks_12_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_Mul */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_Mul[] = { "_vector_field_main_blocks_12_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 4, 4, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad[] = { "_vector_field_main_blocks_12_convnext_1_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0[] = {1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 512, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186752937734127f, .offset= -193}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0073386458680034f, .offset= -113}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008637058781460f, .offset= -145}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_dilation[] = {1, 2}; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight", "tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038935237098485f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038935237098485f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038935237098485f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038935237098485f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_Mul_1[] = { "_vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038935237098485f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032232932280749f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022334107197821f, .offset= -91}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_12_convnext_1_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight", "tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0342730171978474f, .offset= -68}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0342730171978474f, .offset= -68}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0342730171978474f, .offset= -68}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0342730171978474f, .offset= -68}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0104251131415367f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020815625321120f, .offset= -223}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight", "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0514443255960941f, .offset= -156}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0514443255960941f, .offset= -156}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0514443255960941f, .offset= -156}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_26(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_26 */ Qnn_Param_t params__elementwiseneuron_26[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_26[] = { "_vector_field_main_blocks_12_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_26[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205427687615156f, .offset= -8}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_26", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_26, // Node Params 1, // Num Node Params inputs__elementwiseneuron_26, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_26, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205427687615156f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0205427687615156f, .offset= -8}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0114115364849567f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009695755434223f, .offset= -119}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight", "tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0201173648238182f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0201173648238182f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0201173648238182f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0201173648238182f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009302274556831f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_1_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_Mul_2[] = { "tts_ttl_vector_field_main_blocks_12_convnext_1_gamma", "_vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036431928165257f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_Add */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_Add[] = { "_vector_field_main_blocks_12_convnext_1_Mul_output_0", "_vector_field_main_blocks_12_convnext_1_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_1_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_1_Mul_3[] = { "_vector_field_main_blocks_12_convnext_1_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_1_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_Mul */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_Mul[] = { "_vector_field_main_blocks_12_convnext_1_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 8, 8, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad[] = { "_vector_field_main_blocks_12_convnext_2_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0[] = {1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 512, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188874658197165f, .offset= -193}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060494351200759f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004569482116494f, .offset= -207}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_dilation[] = {1, 4}; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight", "tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036449111066759f, .offset= -196}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036449111066759f, .offset= -196}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036449111066759f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036449111066759f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_Mul_1[] = { "_vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036449111066759f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032543642446399f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021181579213589f, .offset= -216}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_12_convnext_2_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight", "tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0386857315897942f, .offset= -210}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0386857315897942f, .offset= -210}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0386857315897942f, .offset= -210}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0386857315897942f, .offset= -210}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0130681693553925f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025028337258846f, .offset= -229}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight", "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0532378703355789f, .offset= -197}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0532378703355789f, .offset= -197}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0532378703355789f, .offset= -197}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_28(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_28 */ Qnn_Param_t params__elementwiseneuron_28[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_28[] = { "_vector_field_main_blocks_12_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_28[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0127566708251834f, .offset= -13}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_28", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_28, // Node Params 1, // Num Node Params inputs__elementwiseneuron_28, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_28, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0127566708251834f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0127566708251834f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0154245067387819f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012763265985996f, .offset= -90}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight", "tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199142228811979f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199142228811979f, .offset= -151}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199142228811979f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199142228811979f, .offset= -151}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017323364736512f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_2_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_Mul_2[] = { "tts_ttl_vector_field_main_blocks_12_convnext_2_gamma", "_vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047844783402979f, .offset= -156}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_Add */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_Add[] = { "_vector_field_main_blocks_12_convnext_2_Mul_output_0", "_vector_field_main_blocks_12_convnext_2_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_2_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_2_Mul_3[] = { "_vector_field_main_blocks_12_convnext_2_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_2_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_Mul */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_Mul[] = { "_vector_field_main_blocks_12_convnext_2_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 16, 16, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad[] = { "_vector_field_main_blocks_12_convnext_3_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0[] = {1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 512, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0190723277628422f, .offset= -194}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042295511811972f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012690261937678f, .offset= -133}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_dilation[] = {1, 8}; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight", "tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035380241461098f, .offset= -180}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035380241461098f, .offset= -180}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035380241461098f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035380241461098f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_Mul_1[] = { "_vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035380241461098f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030947807244956f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023465561680496f, .offset= -162}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_12_convnext_3_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight", "tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0406849160790443f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0406849160790443f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0406849160790443f, .offset= -171}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0406849160790443f, .offset= -171}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0127649623900652f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026405027601868f, .offset= -228}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight", "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0486221201717854f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0486221201717854f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0486221201717854f, .offset= -198}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_30(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_30 */ Qnn_Param_t params__elementwiseneuron_30[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_30[] = { "_vector_field_main_blocks_12_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_30[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0114724254235625f, .offset= -15}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_30", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_30, // Node Params 1, // Num Node Params inputs__elementwiseneuron_30, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_30, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_12_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0114724254235625f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0114724254235625f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0149918664246798f, .offset= -112}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015327085275203f, .offset= -62}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight", "tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0198976732790470f, .offset= -98}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0198976732790470f, .offset= -98}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0198976732790470f, .offset= -98}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0198976732790470f, .offset= -98}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_12_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_12_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026888949796557f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_12_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_12_convnext_3_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_12_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_Mul_2[] = { "tts_ttl_vector_field_main_blocks_12_convnext_3_gamma", "_vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0080535095185041f, .offset= -99}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_Add */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_Add[] = { "_vector_field_main_blocks_12_convnext_3_Mul_output_0", "_vector_field_main_blocks_12_convnext_3_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0215212143957615f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_Mul_3[] = { "_vector_field_main_blocks_12_convnext_3_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0215212143957615f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf[] = { "_vector_field_main_blocks_12_convnext_3_Mul_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0215212143957615f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_13_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_13_Add */ Qnn_Param_t params__vector_field_main_blocks_13_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_13_Add[] = { "_vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf", "_vector_field_main_blocks_13_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_13_Add_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_13_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_13_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_13_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_13_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_13_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_13_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_13_Add_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_13_Add_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_13_Add_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_13_Add_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_13_Add_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_Add_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_13_Add_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_13_Add_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_13_Add_output_0_nfc[] = { "_vector_field_main_blocks_13_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_13_Add_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_13_Add_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_Add_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_13_Add_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_13_Add_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_13_Add_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_13_Add_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_13_Add_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_13_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_13_Mul */ Qnn_Param_t params__vector_field_main_blocks_13_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_13_Mul[] = { "_vector_field_main_blocks_13_Add_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_13_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_13_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_13_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_13_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_13_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_13_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_13_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_13_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_Mul[] = { "_vector_field_main_blocks_13_Mul_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_14_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0226508788764477f, .offset= -174}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033035164233297f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016747618792579f, .offset= -171}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038446942344308f, .offset= -106}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038446942344308f, .offset= -106}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038446942344308f, .offset= -106}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038446942344308f, .offset= -106}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_Mul_1[] = { "_vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038446942344308f, .offset= -106}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056689991615713f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027371477335691f, .offset= -152}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_14_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0410469211637974f, .offset= -73}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0410469211637974f, .offset= -73}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0410469211637974f, .offset= -73}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0410469211637974f, .offset= -73}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0173901207745075f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026282796170563f, .offset= -233}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345621034502983f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345621034502983f, .offset= -198}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345621034502983f, .offset= -198}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_32(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_32 */ Qnn_Param_t params__elementwiseneuron_32[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_32[] = { "_vector_field_main_blocks_14_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_32[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081314332783222f, .offset= -21}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_32", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_32, // Node Params 1, // Num Node Params inputs__elementwiseneuron_32, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_32, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_14_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081314332783222f, .offset= -21}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0081314332783222f, .offset= -21}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0225059669464827f, .offset= -135}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019626442808658f, .offset= -66}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0264784414321184f, .offset= -71}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0264784414321184f, .offset= -71}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0264784414321184f, .offset= -71}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0264784414321184f, .offset= -71}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_14_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_14_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038028676062822f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_14_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_14_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_14_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_14_convnext_0_gamma", "_vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0154858967289329f, .offset= -62}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_Add[] = { "_vector_field_main_blocks_14_convnext_0_Mul_output_0", "_vector_field_main_blocks_14_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0277508422732353f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_14_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_14_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_14_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_14_convnext_0_Mul_3[] = { "_vector_field_main_blocks_14_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_14_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_14_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_14_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0265735648572445f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_14_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_14_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_14_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_14_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_14_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_Mul */ Qnn_Param_t params__vector_field_main_blocks_15_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_Mul[] = { "_vector_field_main_blocks_14_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_15_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0265735648572445f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_15_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0265735648572445f, .offset= -154}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3191(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3191[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3191", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3191", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116140060126781f, .offset= -139}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3191, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3191), .dataSize=BINLEN(onnx__MatMul_3191)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056930594146252f, .offset= -115}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul[] = { "_vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3191", "tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_15_attn_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_15_attn_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Split */ uint32_t dimensions__vector_field_main_blocks_15_attn_Split_split_index[] = {3}; uint32_t _vector_field_main_blocks_15_attn_Split_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Split_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Split[] = { "_vector_field_main_blocks_15_attn_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_output_0[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_output_1[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_output_2[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_output_3[] = {1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_15_attn_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Split, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze[] = { "_vector_field_main_blocks_15_attn_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_1[] = { "_vector_field_main_blocks_15_attn_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_1_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_2 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_2[] = { "_vector_field_main_blocks_15_attn_Split_output_2" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_2_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_2", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Unsqueeze_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Unsqueeze_3 */ const char* inputs__vector_field_main_blocks_15_attn_Unsqueeze_3[] = { "_vector_field_main_blocks_15_attn_Split_output_3" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Unsqueeze_3_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Unsqueeze_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Unsqueeze_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Unsqueeze_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Unsqueeze_3", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Unsqueeze_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Unsqueeze_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Concat */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Concat[] = { "_vector_field_main_blocks_15_attn_Unsqueeze_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_1_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_2_output_0", "_vector_field_main_blocks_15_attn_Unsqueeze_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Concat_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_15_attn_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Concat, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Slice_1 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_1_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_15_attn_Slice_1_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Slice_1_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Slice_1[] = { "_vector_field_main_blocks_15_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_15_attn_Slice_1, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_15_attn_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Slice_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Slice_2 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_2_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_15_attn_Slice_2_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Slice_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_2_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_2_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Slice_2_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Slice_2[] = { "_vector_field_main_blocks_15_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Slice_2_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Slice_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Slice_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0665431767702103f, .offset= -135}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Slice_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Slice_2", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_15_attn_Slice_2, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_15_attn_Slice_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Slice_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_3[] = { "_vector_field_main_blocks_15_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_3_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0456611998379230f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_4 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_4[] = { "_vector_field_main_blocks_15_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_4_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0527306571602821f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_5 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_5[] = { "_vector_field_main_blocks_15_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_5_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0478659309446812f, .offset= -117}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_5", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_5, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_6 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_6[] = { "_vector_field_main_blocks_15_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Mul_6_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Mul_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0639081820845604f, .offset= -140}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Mul_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_6", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_6, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_6, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Sub(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Sub */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Sub[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Sub[] = { "_vector_field_main_blocks_15_attn_Mul_3_output_0", "_vector_field_main_blocks_15_attn_Mul_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Sub_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Sub[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Sub_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0672554671764374f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Sub_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Sub", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Sub, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Sub, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Sub, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Add_1 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Add_1[] = { "_vector_field_main_blocks_15_attn_Mul_5_output_0", "_vector_field_main_blocks_15_attn_Mul_6_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Add_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Add_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0683003962039948f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Add_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Add_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Concat_3[] = { "_vector_field_main_blocks_15_attn_Sub_output_0", "_vector_field_main_blocks_15_attn_Add_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Concat_3_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0683003962039948f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_15_attn_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_MatMul */ Qnn_Param_t params__vector_field_main_blocks_15_attn_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_MatMul[] = { "_vector_field_main_blocks_15_attn_Concat_3_output_0", "_vector_field_main_blocks_15_attn_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_MatMul_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 4.6630754470825195f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_15_attn_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Div_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Div_4 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Div_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Div_4[] = { "_vector_field_main_blocks_15_attn_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Div_4_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Div_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Div_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2914422154426575f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Div_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Div_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Div_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Div_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Div_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Where */ const char* inputs__vector_field_main_blocks_15_attn_Where[] = { "_vector_field_main_blocks_21_attn_Cast_2_output_0", "_vector_field_main_blocks_3_attn_Constant_42_output_0", "_vector_field_main_blocks_15_attn_Div_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Where_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1334440575053054352202761503860850688.0000000000000000f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Softmax */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Softmax[] = { "_vector_field_main_blocks_15_attn_Where_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Softmax_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_15_attn_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Where_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Where_1 */ const char* inputs__vector_field_main_blocks_15_attn_Where_1[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_15_attn_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Where_1_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Where_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Where_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Where_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Where_1", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Where_1, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Where_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_MatMul_1[] = { "_vector_field_main_blocks_15_attn_Where_1_output_0", "_vector_field_main_blocks_15_attn_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_MatMul_1_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_15_attn_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Split_3 */ uint32_t dimensions__vector_field_main_blocks_15_attn_Split_3_split_index[] = {3}; uint32_t _vector_field_main_blocks_15_attn_Split_3_split_index[] = {1, 2, 3}; Qnn_Param_t params__vector_field_main_blocks_15_attn_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_attn_Split_3_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Split_3[] = { "_vector_field_main_blocks_15_attn_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_3_output_0[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_3_output_1[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_3_output_2[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_15_attn_Split_3_output_3[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_3_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_3_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Split_3_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Split_3_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_15_attn_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_attn_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Split_3, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Concat_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Concat_5 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Concat_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Concat_5[] = { "_vector_field_main_blocks_15_attn_Split_3_output_0", "_vector_field_main_blocks_15_attn_Split_3_output_1", "_vector_field_main_blocks_15_attn_Split_3_output_2", "_vector_field_main_blocks_15_attn_Split_3_output_3" }; uint32_t dimensions__vector_field_main_blocks_15_attn_Concat_5_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Concat_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_Concat_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_15_attn_Concat_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Concat_5", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_15_attn_Concat_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Concat_5, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Concat_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Squeeze */ const char* inputs__vector_field_main_blocks_15_attn_Squeeze[] = { "_vector_field_main_blocks_15_attn_Concat_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1076362803578377f, .offset= -128}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3200(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3200[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3200", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3200", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0094631463289261f, .offset= -141}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3200, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3200), .dataSize=BINLEN(onnx__MatMul_3200)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009942576289177f, .offset= -105}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_15_attn_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3200", "tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3156403303146362f, .offset= -143}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3156403303146362f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_attn_Mul_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_attn_Mul_14 */ Qnn_Param_t params__vector_field_main_blocks_15_attn_Mul_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_attn_Mul_14[] = { "_vector_field_main_blocks_15_attn_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_15_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_attn_Mul_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3156403303146362f, .offset= -143}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_attn_Mul_14", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_attn_Mul_14, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_attn_Mul_14, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_attn_Mul_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_Add */ Qnn_Param_t params__vector_field_main_blocks_15_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_15_Add[] = { "_vector_field_main_blocks_15_Transpose_1_output_0", "_vector_field_main_blocks_15_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_15_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.3184200227260590f, .offset= -144}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_15_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_15_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_15_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_15_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009872997179627f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_15_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_15_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_15_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_15_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_15_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_15_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_15_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009701388771646f, .offset= -111}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_15_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_15_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_15_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_15_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_15_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_15_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_15_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_15_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_15_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_15_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_15_norm_norm_weight", "tts_ttl_vector_field_main_blocks_15_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_15_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_15_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_15_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_15_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_15_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_15_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_15_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_15_Mul_1[] = { "_vector_field_main_blocks_15_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_15_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_15_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_15_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_15_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_15_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_15_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_15_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_15_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_Mul[] = { "_vector_field_main_blocks_15_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_16_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0167574323713779f, .offset= -196}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056649777106941f, .offset= -146}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008040643879212f, .offset= -94}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020962513517588f, .offset= -184}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020962513517588f, .offset= -184}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020962513517588f, .offset= -184}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020962513517588f, .offset= -184}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_Mul_1[] = { "_vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020962513517588f, .offset= -184}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028830682858825f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021414232905954f, .offset= -177}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_16_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0395847782492638f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0395847782492638f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0395847782492638f, .offset= -173}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0395847782492638f, .offset= -173}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090254312381148f, .offset= -124}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020263392943889f, .offset= -210}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350021310150623f, .offset= -191}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350021310150623f, .offset= -191}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0350021310150623f, .offset= -191}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_34(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_34 */ Qnn_Param_t params__elementwiseneuron_34[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_34[] = { "_vector_field_main_blocks_16_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_34[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0093642333522439f, .offset= -18}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_34", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_34, // Node Params 1, // Num Node Params inputs__elementwiseneuron_34, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_34, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_16_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0093642333522439f, .offset= -18}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0093642333522439f, .offset= -18}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164859723299742f, .offset= -135}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016987802227959f, .offset= -100}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0222648158669472f, .offset= -78}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0222648158669472f, .offset= -78}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0222648158669472f, .offset= -78}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0222648158669472f, .offset= -78}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_16_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_16_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017605200409889f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_16_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_16_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_16_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_16_convnext_0_gamma", "_vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0088114729151130f, .offset= -55}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_Add[] = { "_vector_field_main_blocks_16_convnext_0_Mul_output_0", "_vector_field_main_blocks_16_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0234652906656265f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_16_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_16_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_16_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_16_convnext_0_Mul_3[] = { "_vector_field_main_blocks_16_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_16_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_16_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_16_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0234652906656265f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_16_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_16_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_16_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_16_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_16_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_Mul */ Qnn_Param_t params__vector_field_main_blocks_17_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_17_Mul[] = { "_vector_field_main_blocks_16_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_17_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0234652906656265f, .offset= -154}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_17_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_17_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0234652906656265f, .offset= -154}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3206(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3206[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3206", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3206", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0148021113127470f, .offset= -116}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3206, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3206), .dataSize=BINLEN(onnx__MatMul_3206)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046985330991447f, .offset= -122}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul[] = { "_vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3206", "tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_17_attention_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_17_attention_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_17_attention_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_17_attention_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Split */ uint32_t dimensions__vector_field_main_blocks_17_attention_Split_split_index[] = {1}; uint32_t _vector_field_main_blocks_17_attention_Split_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_17_attention_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_17_attention_Split_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Split[] = { "_vector_field_main_blocks_17_attention_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Split_output_0[] = {1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_17_attention_Split_output_1[] = {1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_17_attention_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_attention_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Split, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Unsqueeze */ const char* inputs__vector_field_main_blocks_17_attention_Unsqueeze[] = { "_vector_field_main_blocks_17_attention_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Unsqueeze_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_17_attention_Unsqueeze_1[] = { "_vector_field_main_blocks_17_attention_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Unsqueeze_1_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Concat */ Qnn_Param_t params__vector_field_main_blocks_17_attention_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Concat[] = { "_vector_field_main_blocks_17_attention_Unsqueeze_output_0", "_vector_field_main_blocks_17_attention_Unsqueeze_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Concat_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286079067736864f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_17_attention_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_attention_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw[] = {2, 1, 128, 50}; VALIDATE(model.addTensor("_vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078389523550868f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw), .dataSize=BINLEN(_vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_MatMul */ Qnn_Param_t params__vector_field_main_blocks_17_attention_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_17_attention_MatMul[] = { "_vector_field_main_blocks_17_attention_Concat_output_0", "_vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw" }; uint32_t dimensions__vector_field_main_blocks_17_attention_MatMul_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.6630928516387939f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_17_attention_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_attention_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Div */ Qnn_Param_t params__vector_field_main_blocks_17_attention_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Div[] = { "_vector_field_main_blocks_17_attention_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Div_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0414433032274246f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_17_attention_Div, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_attention_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Softmax */ Qnn_Param_t params__vector_field_main_blocks_17_attention_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Softmax[] = { "_vector_field_main_blocks_17_attention_Div_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Softmax_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_17_attention_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_attention_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Where */ const char* inputs__vector_field_main_blocks_17_attention_Where[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_17_attention_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Where_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026828853879124f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_17_attention_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_17_attention_MatMul_1[] = { "_vector_field_main_blocks_17_attention_Where_output_0", "_vector_field_main_blocks_17_attention_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_MatMul_1_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018044338794425f, .offset= -84}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_17_attention_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_attention_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Split_3 */ uint32_t dimensions__vector_field_main_blocks_17_attention_Split_3_split_index[] = {1}; uint32_t _vector_field_main_blocks_17_attention_Split_3_split_index[] = {1}; Qnn_Param_t params__vector_field_main_blocks_17_attention_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_17_attention_Split_3_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Split_3[] = { "_vector_field_main_blocks_17_attention_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Split_3_output_0[] = {1, 1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_17_attention_Split_3_output_1[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018044338794425f, .offset= -84}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018044338794425f, .offset= -84}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_17_attention_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_attention_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Split_3, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_17_attention_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Concat_3[] = { "_vector_field_main_blocks_17_attention_Split_3_output_0", "_vector_field_main_blocks_17_attention_Split_3_output_1" }; uint32_t dimensions__vector_field_main_blocks_17_attention_Concat_3_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018044338794425f, .offset= -84}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_17_attention_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_17_attention_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_attention_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Squeeze */ const char* inputs__vector_field_main_blocks_17_attention_Squeeze[] = { "_vector_field_main_blocks_17_attention_Concat_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018044338794425f, .offset= -84}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3209(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3209[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3209", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3209", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0065257675014436f, .offset= -120}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3209, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3209), .dataSize=BINLEN(onnx__MatMul_3209)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014566580066457f, .offset= -67}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_17_attention_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3209", "tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039576287381351f, .offset= -99}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039576287381351f, .offset= -99}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_attention_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_attention_Mul */ Qnn_Param_t params__vector_field_main_blocks_17_attention_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_17_attention_Mul[] = { "_vector_field_main_blocks_17_attention_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_17_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_attention_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039576287381351f, .offset= -99}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_attention_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_17_attention_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_attention_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_attention_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_Add */ Qnn_Param_t params__vector_field_main_blocks_17_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_17_Add[] = { "_vector_field_main_blocks_17_Transpose_1_output_0", "_vector_field_main_blocks_17_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_17_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0234645307064056f, .offset= -152}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_17_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_17_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_17_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_17_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_17_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012374194338918f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_17_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_17_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_17_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_17_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_17_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_17_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_17_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011180669534951f, .offset= -94}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_17_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_17_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_17_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_17_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_17_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_17_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_17_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_17_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_17_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_17_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_17_norm_norm_weight", "tts_ttl_vector_field_main_blocks_17_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_17_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_17_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_17_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_17_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_17_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_17_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_17_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_17_Mul_1[] = { "_vector_field_main_blocks_17_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_17_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_17_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_17_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_17_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_17_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_17_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_17_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_17_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_Mul[] = { "_vector_field_main_blocks_17_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_18_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0188554730266333f, .offset= -165}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0080123776569963f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004867749230471f, .offset= -132}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022313657682389f, .offset= -155}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022313657682389f, .offset= -155}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022313657682389f, .offset= -155}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022313657682389f, .offset= -155}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_Mul_1[] = { "_vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022313657682389f, .offset= -155}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038156453520060f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018067401833832f, .offset= -160}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_18_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211411938071251f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211411938071251f, .offset= -133}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211411938071251f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0211411938071251f, .offset= -133}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085923960432410f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023558763787150f, .offset= -233}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0378313362598419f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0378313362598419f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0378313362598419f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_36(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_36 */ Qnn_Param_t params__elementwiseneuron_36[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_36[] = { "_vector_field_main_blocks_18_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_36[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0132547132670879f, .offset= -13}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_36", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_36, // Node Params 1, // Num Node Params inputs__elementwiseneuron_36, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_36, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0132547132670879f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0132547132670879f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0259406995028257f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012865945464000f, .offset= -76}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152177512645721f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152177512645721f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152177512645721f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152177512645721f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012815210502595f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_18_convnext_0_gamma", "_vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036062251310796f, .offset= -103}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_Add[] = { "_vector_field_main_blocks_18_convnext_0_Mul_output_0", "_vector_field_main_blocks_18_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_0_Mul_3[] = { "_vector_field_main_blocks_18_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_Mul */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_Mul[] = { "_vector_field_main_blocks_18_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 4, 4, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad[] = { "_vector_field_main_blocks_18_convnext_1_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0[] = {1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 512, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 200}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 200, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203804876655340f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074592875316739f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012381124543026f, .offset= -87}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_dilation[] = {1, 2}; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight", "tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030087754130363f, .offset= -162}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030087754130363f, .offset= -162}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030087754130363f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030087754130363f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_Mul_1[] = { "_vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030087754130363f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022866169456393f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022022577468306f, .offset= -149}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_18_convnext_1_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight", "tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0230510439723730f, .offset= -167}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0230510439723730f, .offset= -167}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0230510439723730f, .offset= -167}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0230510439723730f, .offset= -167}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0134809287264943f, .offset= -138}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026447118725628f, .offset= -232}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight", "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0415937528014183f, .offset= -199}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0415937528014183f, .offset= -199}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0415937528014183f, .offset= -199}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_38(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_38 */ Qnn_Param_t params__elementwiseneuron_38[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_38[] = { "_vector_field_main_blocks_18_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_38[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0097463438287377f, .offset= -17}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_38", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_38, // Node Params 1, // Num Node Params inputs__elementwiseneuron_38, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_38, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0097463438287377f, .offset= -17}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0097463438287377f, .offset= -17}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156945921480656f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013196710497141f, .offset= -66}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight", "tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156821459531784f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156821459531784f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156821459531784f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156821459531784f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012063918402418f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_1_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_Mul_2[] = { "tts_ttl_vector_field_main_blocks_18_convnext_1_gamma", "_vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033595147542655f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_Add */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_Add[] = { "_vector_field_main_blocks_18_convnext_1_Mul_output_0", "_vector_field_main_blocks_18_convnext_1_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_1_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_1_Mul_3[] = { "_vector_field_main_blocks_18_convnext_1_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_1_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_Mul */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_Mul[] = { "_vector_field_main_blocks_18_convnext_1_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 8, 8, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad[] = { "_vector_field_main_blocks_18_convnext_2_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0[] = {1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 512, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 208}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 208, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0207403451204300f, .offset= -171}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0051994924433529f, .offset= -116}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007022750214674f, .offset= -138}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_dilation[] = {1, 4}; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight", "tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026728485245258f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026728485245258f, .offset= -120}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026728485245258f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026728485245258f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_Mul_1[] = { "_vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026728485245258f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027269162237644f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024209006223828f, .offset= -71}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_18_convnext_2_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight", "tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345285087823868f, .offset= -74}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345285087823868f, .offset= -74}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345285087823868f, .offset= -74}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0345285087823868f, .offset= -74}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0156131256371737f, .offset= -122}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022532923612744f, .offset= -232}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight", "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0546297058463097f, .offset= -209}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0546297058463097f, .offset= -209}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0546297058463097f, .offset= -209}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_40(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_40 */ Qnn_Param_t params__elementwiseneuron_40[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_40[] = { "_vector_field_main_blocks_18_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_40[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103976670652628f, .offset= -16}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_40", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_40, // Node Params 1, // Num Node Params inputs__elementwiseneuron_40, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_40, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103976670652628f, .offset= -16}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103976670652628f, .offset= -16}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0100775966420770f, .offset= -135}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009716211352497f, .offset= -80}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight", "tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164929535239935f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164929535239935f, .offset= -137}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164929535239935f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164929535239935f, .offset= -137}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014381660148501f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_2_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_Mul_2[] = { "tts_ttl_vector_field_main_blocks_18_convnext_2_gamma", "_vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039680595509708f, .offset= -153}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_Add */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_Add[] = { "_vector_field_main_blocks_18_convnext_2_Mul_output_0", "_vector_field_main_blocks_18_convnext_2_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_2_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_2_Mul_3[] = { "_vector_field_main_blocks_18_convnext_2_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_2_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_Mul */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_Mul[] = { "_vector_field_main_blocks_18_convnext_2_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 16, 16, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad[] = { "_vector_field_main_blocks_18_convnext_3_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0[] = {1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 512, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 224}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 224, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213935654610395f, .offset= -173}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038124984130263f, .offset= -156}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013295882381499f, .offset= -102}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_dilation[] = {1, 8}; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight", "tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031503697391599f, .offset= -160}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031503697391599f, .offset= -160}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031503697391599f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031503697391599f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_Mul_1[] = { "_vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031503697391599f, .offset= -160}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022995679173619f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025334903039038f, .offset= -194}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_18_convnext_3_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight", "tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0349919125437737f, .offset= -195}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0349919125437737f, .offset= -195}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0349919125437737f, .offset= -195}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0349919125437737f, .offset= -195}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0173088163137436f, .offset= -86}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022515144664794f, .offset= -230}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight", "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481095500290394f, .offset= -188}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481095500290394f, .offset= -188}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0481095500290394f, .offset= -188}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_42(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_42 */ Qnn_Param_t params__elementwiseneuron_42[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_42[] = { "_vector_field_main_blocks_18_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_42[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0132078593596816f, .offset= -13}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_42", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_42, // Node Params 1, // Num Node Params inputs__elementwiseneuron_42, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_42, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_18_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0132078593596816f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0132078593596816f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0181757397949696f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010646887822077f, .offset= -81}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight", "tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0168525185436010f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0168525185436010f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0168525185436010f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0168525185436010f, .offset= -125}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_18_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_18_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029803114011884f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_18_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_18_convnext_3_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_18_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_Mul_2[] = { "tts_ttl_vector_field_main_blocks_18_convnext_3_gamma", "_vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074890148825943f, .offset= -100}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_Add */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_Add[] = { "_vector_field_main_blocks_18_convnext_3_Mul_output_0", "_vector_field_main_blocks_18_convnext_3_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220506638288498f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_Mul_3[] = { "_vector_field_main_blocks_18_convnext_3_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220506638288498f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf[] = { "_vector_field_main_blocks_18_convnext_3_Mul_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0220506638288498f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_19_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_19_Add */ Qnn_Param_t params__vector_field_main_blocks_19_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_19_Add[] = { "_vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf", "_vector_field_main_blocks_19_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_19_Add_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_19_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_19_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_19_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_19_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_19_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_19_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_19_Add_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_19_Add_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_19_Add_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_19_Add_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_19_Add_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_Add_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_19_Add_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_19_Add_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_19_Add_output_0_nfc[] = { "_vector_field_main_blocks_19_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_19_Add_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_19_Add_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_Add_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_19_Add_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_19_Add_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_19_Add_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_19_Add_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_19_Add_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_19_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_19_Mul */ Qnn_Param_t params__vector_field_main_blocks_19_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_19_Mul[] = { "_vector_field_main_blocks_19_Add_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_19_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_19_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_19_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_19_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_19_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_19_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_19_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_19_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_Mul[] = { "_vector_field_main_blocks_19_Mul_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_20_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0248759854584932f, .offset= -180}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032306283246726f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012077529681846f, .offset= -73}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066726463846862f, .offset= -176}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066726463846862f, .offset= -176}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066726463846862f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0066726463846862f, .offset= -176}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_Mul_1[] = { "_vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0041472180746496f, .offset= -128}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044431793503463f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028722819406539f, .offset= -75}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_20_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389839000999928f, .offset= -85}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389839000999928f, .offset= -85}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389839000999928f, .offset= -85}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0389839000999928f, .offset= -85}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0106871845200658f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022932097781450f, .offset= -224}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0306320600211620f, .offset= -205}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0306320600211620f, .offset= -205}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0306320600211620f, .offset= -205}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_44(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_44 */ Qnn_Param_t params__elementwiseneuron_44[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_44[] = { "_vector_field_main_blocks_20_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_44[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062973308376968f, .offset= -27}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_44", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_44, // Node Params 1, // Num Node Params inputs__elementwiseneuron_44, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_44, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_20_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062973308376968f, .offset= -27}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062973308376968f, .offset= -27}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0260726511478424f, .offset= -117}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012435544049367f, .offset= -77}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203672572970390f, .offset= -68}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203672572970390f, .offset= -68}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203672572970390f, .offset= -68}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0203672572970390f, .offset= -68}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_20_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_20_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037623578682542f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_20_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_20_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_20_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_20_convnext_0_gamma", "_vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0128127075731754f, .offset= -73}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_Add[] = { "_vector_field_main_blocks_20_convnext_0_Mul_output_0", "_vector_field_main_blocks_20_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293873120099306f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_20_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_20_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_20_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_20_convnext_0_Mul_3[] = { "_vector_field_main_blocks_20_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_20_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_20_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_20_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293873120099306f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_20_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_20_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_20_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_20_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_20_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_Mul */ Qnn_Param_t params__vector_field_main_blocks_21_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_Mul[] = { "_vector_field_main_blocks_20_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_21_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293873120099306f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_21_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0293873120099306f, .offset= -162}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3236(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3236[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3236", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3236", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0094587914645672f, .offset= -134}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3236, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3236), .dataSize=BINLEN(onnx__MatMul_3236)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052975779399276f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul[] = { "_vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3236", "tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_21_attn_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_21_attn_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Split */ uint32_t dimensions__vector_field_main_blocks_21_attn_Split_split_index[] = {3}; uint32_t _vector_field_main_blocks_21_attn_Split_split_index[] = {64, 128, 192}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Split_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Split[] = { "_vector_field_main_blocks_21_attn_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_output_0[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_output_1[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_output_2[] = {1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_output_3[] = {1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_21_attn_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Split, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze[] = { "_vector_field_main_blocks_21_attn_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_1[] = { "_vector_field_main_blocks_21_attn_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_1_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_2 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_2[] = { "_vector_field_main_blocks_21_attn_Split_output_2" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_2_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_2", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Unsqueeze_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Unsqueeze_3 */ const char* inputs__vector_field_main_blocks_21_attn_Unsqueeze_3[] = { "_vector_field_main_blocks_21_attn_Split_output_3" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Unsqueeze_3_output_0[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Unsqueeze_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Unsqueeze_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Unsqueeze_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Unsqueeze_3", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Unsqueeze_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Unsqueeze_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Concat */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Concat[] = { "_vector_field_main_blocks_21_attn_Unsqueeze_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_1_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_2_output_0", "_vector_field_main_blocks_21_attn_Unsqueeze_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Concat_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_21_attn_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Concat, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Slice_1 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_1_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_21_attn_Slice_1_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 0, 32, 1}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Slice_1_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Slice_1[] = { "_vector_field_main_blocks_21_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_21_attn_Slice_1, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_21_attn_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Slice_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Slice_2 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_2_ranges[] = {4, 3}; int32_t _vector_field_main_blocks_21_attn_Slice_2_ranges[] = {0, 4, 1, 0, 1, 1, 0, 192, 1, 32, 64, 1}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Slice_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_2_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_2_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Slice_2_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Slice_2[] = { "_vector_field_main_blocks_21_attn_Concat_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Slice_2_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Slice_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Slice_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0745217725634575f, .offset= -115}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Slice_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Slice_2", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__vector_field_main_blocks_21_attn_Slice_2, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_21_attn_Slice_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Slice_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_3[] = { "_vector_field_main_blocks_21_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_3_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0762708261609077f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_4 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_4[] = { "_vector_field_main_blocks_21_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_4_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0410989448428154f, .offset= -101}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_5 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_5[] = { "_vector_field_main_blocks_21_attn_Slice_1_output_0", "_vector_field_main_blocks_3_attn_Sin_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_5_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0775766596198082f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_5", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_5, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_6 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_6[] = { "_vector_field_main_blocks_21_attn_Slice_2_output_0", "_vector_field_main_blocks_3_attn_Cos_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Mul_6_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Mul_6_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0437849573791027f, .offset= -107}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Mul_6_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_6", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_6, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_6, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Sub(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Sub */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Sub[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 18}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Sub[] = { "_vector_field_main_blocks_21_attn_Mul_3_output_0", "_vector_field_main_blocks_21_attn_Mul_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Sub_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Sub[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Sub_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0872200652956963f, .offset= -143}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Sub_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Sub", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Sub, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Sub, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Sub, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Add_1 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Add_1[] = { "_vector_field_main_blocks_21_attn_Mul_5_output_0", "_vector_field_main_blocks_21_attn_Mul_6_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Add_1_output_0[] = {4, 1, 192, 32}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Add_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0785646736621857f, .offset= -130}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Add_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Add_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Concat_3[] = { "_vector_field_main_blocks_21_attn_Sub_output_0", "_vector_field_main_blocks_21_attn_Add_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Concat_3_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0874237343668938f, .offset= -143}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_21_attn_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_MatMul */ Qnn_Param_t params__vector_field_main_blocks_21_attn_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_MatMul[] = { "_vector_field_main_blocks_21_attn_Concat_3_output_0", "_vector_field_main_blocks_21_attn_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_MatMul_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 4.1403288841247559f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_21_attn_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Div_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Div_4 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Div_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Div_4[] = { "_vector_field_main_blocks_21_attn_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Div_4_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Div_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Div_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2587705552577972f, .offset= -131}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Div_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Div_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Div_4, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Div_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Div_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Where */ const char* inputs__vector_field_main_blocks_21_attn_Where[] = { "_vector_field_main_blocks_21_attn_Cast_2_output_0", "_vector_field_main_blocks_3_attn_Constant_42_output_0", "_vector_field_main_blocks_21_attn_Div_4_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Where_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 1334440575053054352202761503860850688.0000000000000000f, .offset= -255}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Softmax */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Softmax[] = { "_vector_field_main_blocks_21_attn_Where_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Softmax_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_21_attn_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Where_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Where_1 */ const char* inputs__vector_field_main_blocks_21_attn_Where_1[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_21_attn_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Where_1_output_0[] = {4, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Where_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Where_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039215688593686f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Where_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Where_1", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Where_1, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Where_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_MatMul_1[] = { "_vector_field_main_blocks_21_attn_Where_1_output_0", "_vector_field_main_blocks_21_attn_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_MatMul_1_output_0[] = {4, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_21_attn_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Split_3 */ uint32_t dimensions__vector_field_main_blocks_21_attn_Split_3_split_index[] = {3}; uint32_t _vector_field_main_blocks_21_attn_Split_3_split_index[] = {1, 2, 3}; Qnn_Param_t params__vector_field_main_blocks_21_attn_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_attn_Split_3_split_index, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Split_3[] = { "_vector_field_main_blocks_21_attn_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_3_output_0[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_3_output_1[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_3_output_2[] = {1, 1, 192, 64}; uint32_t dimensions__vector_field_main_blocks_21_attn_Split_3_output_3[] = {1, 1, 192, 64}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_3_output_2", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_3_output_2, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Split_3_output_3", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Split_3_output_3, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_21_attn_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_attn_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Split_3, // Output Tensors 4// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Concat_5(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Concat_5 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Concat_5[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Concat_5[] = { "_vector_field_main_blocks_21_attn_Split_3_output_0", "_vector_field_main_blocks_21_attn_Split_3_output_1", "_vector_field_main_blocks_21_attn_Split_3_output_2", "_vector_field_main_blocks_21_attn_Split_3_output_3" }; uint32_t dimensions__vector_field_main_blocks_21_attn_Concat_5_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Concat_5[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_Concat_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_21_attn_Concat_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Concat_5", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_21_attn_Concat_5, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Concat_5, // Input Tensor Names 4, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Concat_5, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Squeeze */ const char* inputs__vector_field_main_blocks_21_attn_Squeeze[] = { "_vector_field_main_blocks_21_attn_Concat_5_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0756306499242783f, .offset= -125}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3245(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3245[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3245", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3245", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0098755918443203f, .offset= -118}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3245, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3245), .dataSize=BINLEN(onnx__MatMul_3245)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0019575608894229f, .offset= -189}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_21_attn_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3245", "tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2865160703659058f, .offset= -119}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2865160703659058f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_attn_Mul_14(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_attn_Mul_14 */ Qnn_Param_t params__vector_field_main_blocks_21_attn_Mul_14[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_attn_Mul_14[] = { "_vector_field_main_blocks_21_attn_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_21_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_attn_Mul_14[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2865160703659058f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_attn_Mul_14", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_attn_Mul_14, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_attn_Mul_14, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_attn_Mul_14, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_Add */ Qnn_Param_t params__vector_field_main_blocks_21_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_21_Add[] = { "_vector_field_main_blocks_21_Transpose_1_output_0", "_vector_field_main_blocks_21_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_21_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.2881692051887512f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_21_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_21_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_21_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_21_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012620992492884f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_21_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_21_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_21_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_21_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_21_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_21_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_21_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015569323441014f, .offset= -79}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_21_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_21_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_21_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_21_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_21_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_21_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_21_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_21_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_21_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_21_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_21_norm_norm_weight", "tts_ttl_vector_field_main_blocks_21_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_21_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_21_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_21_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_21_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_21_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_21_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_21_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_21_Mul_1[] = { "_vector_field_main_blocks_21_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_21_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_21_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_21_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_21_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_21_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_21_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_21_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_21_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_Mul */ Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_Mul[] = { "_vector_field_main_blocks_21_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad[] = { "_vector_field_main_blocks_22_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0244074389338493f, .offset= -175}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049031013622880f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005820736405440f, .offset= -106}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight", "tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015616406453773f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015616406453773f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015616406453773f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015616406453773f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_Mul_1[] = { "_vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015616406453773f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0025693294592202f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0022860504686832f, .offset= -112}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_22_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight", "tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0353530459105968f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0353530459105968f, .offset= -147}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0353530459105968f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0353530459105968f, .offset= -147}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0129949981346726f, .offset= -97}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020019339863211f, .offset= -209}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight", "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322681181132793f, .offset= -201}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322681181132793f, .offset= -201}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0322681181132793f, .offset= -201}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_46(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_46 */ Qnn_Param_t params__elementwiseneuron_46[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_46[] = { "_vector_field_main_blocks_22_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_46[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072108008898795f, .offset= -24}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_46", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_46, // Node Params 1, // Num Node Params inputs__elementwiseneuron_46, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_46, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_main_blocks_22_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072108008898795f, .offset= -24}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072108008898795f, .offset= -24}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0189892388880253f, .offset= -120}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012078949948773f, .offset= -114}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight", "tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0290996544063091f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0290996544063091f, .offset= -121}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0290996544063091f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0290996544063091f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_22_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_22_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017095159273595f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_22_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_22_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_22_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_Mul_2[] = { "tts_ttl_vector_field_main_blocks_22_convnext_0_gamma", "_vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116903856396675f, .offset= -121}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_Add */ Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_Add[] = { "_vector_field_main_blocks_22_convnext_0_Mul_output_0", "_vector_field_main_blocks_22_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0333916842937469f, .offset= -164}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_22_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_22_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_main_blocks_22_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_22_convnext_0_Mul_3[] = { "_vector_field_main_blocks_22_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_22_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_22_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_22_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0333916842937469f, .offset= -164}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_22_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_22_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_22_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_22_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_22_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_Mul */ Qnn_Param_t params__vector_field_main_blocks_23_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_23_Mul[] = { "_vector_field_main_blocks_22_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_23_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0333916842937469f, .offset= -164}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_23_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape */ const char* inputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape[] = { "_vector_field_main_blocks_23_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0333916842937469f, .offset= -164}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3251(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3251[] = {256, 512}; VALIDATE(model.addTensor("onnx__MatMul_3251", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3251", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0213520415127277f, .offset= -127}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3251, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3251), .dataSize=BINLEN(onnx__MatMul_3251)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias[] = {256}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0056043379008770f, .offset= -137}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_W_query_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_W_query_linear_MatMul */ const char* inputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul[] = { "_vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape", "onnx__MatMul_3251", "tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_23_attention_W_query_linear_Add_output_0_fc[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_W_query_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_23_attention_W_query_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_W_query_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_23_attention_W_query_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_23_attention_W_query_linear_Add_output_0[] = {1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_W_query_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_W_query_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Split(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Split */ uint32_t dimensions__vector_field_main_blocks_23_attention_Split_split_index[] = {1}; uint32_t _vector_field_main_blocks_23_attention_Split_split_index[] = {128}; Qnn_Param_t params__vector_field_main_blocks_23_attention_Split[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_23_attention_Split_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Split[] = { "_vector_field_main_blocks_23_attention_W_query_linear_Add_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Split_output_0[] = {1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_23_attention_Split_output_1[] = {1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Split[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Split", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_23_attention_Split, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_attention_Split, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Split, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Unsqueeze */ const char* inputs__vector_field_main_blocks_23_attention_Unsqueeze[] = { "_vector_field_main_blocks_23_attention_Split_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Unsqueeze_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Unsqueeze_1 */ const char* inputs__vector_field_main_blocks_23_attention_Unsqueeze_1[] = { "_vector_field_main_blocks_23_attention_Split_output_1" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Unsqueeze_1_output_0[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Concat(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Concat */ Qnn_Param_t params__vector_field_main_blocks_23_attention_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Concat[] = { "_vector_field_main_blocks_23_attention_Unsqueeze_output_0", "_vector_field_main_blocks_23_attention_Unsqueeze_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Concat_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0302473902702332f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_23_attention_Concat, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_attention_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Concat, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw[] = {2, 1, 128, 50}; VALIDATE(model.addTensor("_vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0078409770503640f, .offset= -128}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw), .dataSize=BINLEN(_vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_MatMul */ Qnn_Param_t params__vector_field_main_blocks_23_attention_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_23_attention_MatMul[] = { "_vector_field_main_blocks_23_attention_Concat_output_0", "_vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw" }; uint32_t dimensions__vector_field_main_blocks_23_attention_MatMul_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.7935206294059753f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_23_attention_MatMul, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_attention_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Div */ Qnn_Param_t params__vector_field_main_blocks_23_attention_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Div[] = { "_vector_field_main_blocks_23_attention_MatMul_output_0", "_vector_field_main_blocks_3_attn_Constant_39_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Div_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0495950393378735f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_23_attention_Div, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_attention_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Softmax */ Qnn_Param_t params__vector_field_main_blocks_23_attention_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Softmax[] = { "_vector_field_main_blocks_23_attention_Div_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Softmax_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039062500000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__vector_field_main_blocks_23_attention_Softmax, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_attention_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Where */ const char* inputs__vector_field_main_blocks_23_attention_Where[] = { "_vector_field_main_blocks_23_attention_Cast_output_0", "_vector_field_main_blocks_3_attn_Constant_44_output_0", "_vector_field_main_blocks_23_attention_Softmax_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Where_output_0[] = {2, 1, 192, 50}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033727039117366f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_MatMul_1 */ Qnn_Param_t params__vector_field_main_blocks_23_attention_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_main_blocks_23_attention_MatMul_1[] = { "_vector_field_main_blocks_23_attention_Where_output_0", "_vector_field_main_blocks_23_attention_Concat_2_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_MatMul_1_output_0[] = {2, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016814435366541f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__vector_field_main_blocks_23_attention_MatMul_1, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_attention_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Split_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Split_3 */ uint32_t dimensions__vector_field_main_blocks_23_attention_Split_3_split_index[] = {1}; uint32_t _vector_field_main_blocks_23_attention_Split_3_split_index[] = {1}; Qnn_Param_t params__vector_field_main_blocks_23_attention_Split_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="split_index", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_3_split_index", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_3_split_index, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_23_attention_Split_3_split_index, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Split_3[] = { "_vector_field_main_blocks_23_attention_MatMul_1_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Split_3_output_0[] = {1, 1, 192, 128}; uint32_t dimensions__vector_field_main_blocks_23_attention_Split_3_output_1[] = {1, 1, 192, 128}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Split_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016814435366541f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}, (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Split_3_output_1", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016814435366541f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Split_3_output_1, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Split_3", // Node Name "qti.aisw", // Package Name "Split", // Qnn Node Type params__vector_field_main_blocks_23_attention_Split_3, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_attention_Split_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Split_3, // Output Tensors 2// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Concat_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Concat_3 */ Qnn_Param_t params__vector_field_main_blocks_23_attention_Concat_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Concat_3[] = { "_vector_field_main_blocks_23_attention_Split_3_output_0", "_vector_field_main_blocks_23_attention_Split_3_output_1" }; uint32_t dimensions__vector_field_main_blocks_23_attention_Concat_3_output_0[] = {1, 1, 192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Concat_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_Concat_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016814435366541f, .offset= -145}}}, .rank= 4, .dimensions=dimensions__vector_field_main_blocks_23_attention_Concat_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Concat_3", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__vector_field_main_blocks_23_attention_Concat_3, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_attention_Concat_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Concat_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Squeeze */ const char* inputs__vector_field_main_blocks_23_attention_Squeeze[] = { "_vector_field_main_blocks_23_attention_Concat_3_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_pre_reshape[] = {192, 256}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_out_fc_linear_MatMul_pre_reshape", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016814435366541f, .offset= -145}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_pre_reshape, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_onnx__MatMul_3254(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_onnx__MatMul_3254[] = {512, 256}; VALIDATE(model.addTensor("onnx__MatMul_3254", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "onnx__MatMul_3254", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060418960638344f, .offset= -131}}}, .rank= 2, .dimensions=dimensions_onnx__MatMul_3254, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(onnx__MatMul_3254), .dataSize=BINLEN(onnx__MatMul_3254)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012558277230710f, .offset= -115}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_out_fc_linear_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_out_fc_linear_MatMul */ const char* inputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul[] = { "_vector_field_main_blocks_23_attention_out_fc_linear_MatMul_pre_reshape", "onnx__MatMul_3254", "tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias" }; uint32_t dimensions__vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0_fc[] = {192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0_fc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024165608920157f, .offset= -114}}}, .rank= 2, .dimensions=dimensions__vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0_fc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_out_fc_linear_MatMul", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape */ const char* inputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape[] = { "_vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0_fc" }; uint32_t dimensions__vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024165608920157f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_attention_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_attention_Mul */ Qnn_Param_t params__vector_field_main_blocks_23_attention_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_23_attention_Mul[] = { "_vector_field_main_blocks_23_attention_out_fc_linear_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_23_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_attention_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024165608920157f, .offset= -114}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_attention_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_23_attention_Mul, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_attention_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_attention_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_Add */ Qnn_Param_t params__vector_field_main_blocks_23_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_main_blocks_23_Add[] = { "_vector_field_main_blocks_23_Transpose_1_output_0", "_vector_field_main_blocks_23_Transpose_output_0" }; uint32_t dimensions__vector_field_main_blocks_23_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0342322774231434f, .offset= -164}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_23_Add, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_23_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_23_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_23_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_23_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028036891017109f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_23_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_23_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_23_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_main_blocks_23_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_main_blocks_23_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_main_blocks_23_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_main_blocks_23_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008589907665737f, .offset= -116}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_main_blocks_23_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_main_blocks_23_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_main_blocks_23_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_main_blocks_23_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_main_blocks_23_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_main_blocks_23_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_main_blocks_23_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_main_blocks_23_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_main_blocks_23_norm_norm_LayerNormalization[] = { "_vector_field_main_blocks_23_norm_Transpose_output_0", "tts_ttl_vector_field_main_blocks_23_norm_norm_weight", "tts_ttl_vector_field_main_blocks_23_norm_norm_bias" }; uint32_t dimensions__vector_field_main_blocks_23_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_main_blocks_23_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_main_blocks_23_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_main_blocks_23_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_main_blocks_23_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_main_blocks_23_Mul_1 */ Qnn_Param_t params__vector_field_main_blocks_23_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_main_blocks_23_Mul_1[] = { "_vector_field_main_blocks_23_norm_Transpose_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_main_blocks_23_Mul_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_main_blocks_23_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_main_blocks_23_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_main_blocks_23_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_main_blocks_23_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_main_blocks_23_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_main_blocks_23_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_main_blocks_23_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_Mul */ Qnn_Param_t params__vector_field_last_convnext_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_Mul[] = { "_vector_field_main_blocks_23_Mul_1_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Pad */ uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Pad[] = { "_vector_field_last_convnext_convnext_0_Mul_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_last_convnext_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { "_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0138677768409252f, .offset= -118}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0085925003513694f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009525595814921f, .offset= -126}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_2d[] = { "_vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight", "tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036386582069099f, .offset= -86}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036386582069099f, .offset= -86}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036386582069099f, .offset= -86}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036386582069099f, .offset= -86}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_Mul_1 */ Qnn_Param_t params__vector_field_last_convnext_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_Mul_1[] = { "_vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036386582069099f, .offset= -86}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034617802593857f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015813673380762f, .offset= -134}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization[] = { "_vector_field_last_convnext_convnext_0_norm_Transpose_output_0", "tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight", "tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186671223491430f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186671223491430f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186671223491430f, .offset= -162}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0186671223491430f, .offset= -162}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0093388073146343f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032245160546154f, .offset= -227}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d[] = { "_vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight", "tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0404853038489819f, .offset= -175}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0404853038489819f, .offset= -175}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0404853038489819f, .offset= -175}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_48(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_48 */ Qnn_Param_t params__elementwiseneuron_48[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_48[] = { "_vector_field_last_convnext_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_48[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133172683417797f, .offset= -13}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_48", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_48, // Node Params 1, // Num Node Params inputs__elementwiseneuron_48, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_48, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133172683417797f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0133172683417797f, .offset= -13}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0064257695339620f, .offset= -144}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008073560311459f, .offset= -85}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d[] = { "_vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight", "tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0165826305747032f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0165826305747032f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0165826305747032f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0165826305747032f, .offset= -124}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_0_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011530530173331f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_0_gamma), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_Mul_2 */ Qnn_Param_t params__vector_field_last_convnext_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_Mul_2[] = { "tts_ttl_vector_field_last_convnext_convnext_0_gamma", "_vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024080211296678f, .offset= -146}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_Add */ Qnn_Param_t params__vector_field_last_convnext_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_Add[] = { "_vector_field_last_convnext_convnext_0_Mul_output_0", "_vector_field_last_convnext_convnext_0_Mul_2_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_0_Add, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_0_Mul_3 */ Qnn_Param_t params__vector_field_last_convnext_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_0_Mul_3[] = { "_vector_field_last_convnext_convnext_0_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_0_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_Mul */ Qnn_Param_t params__vector_field_last_convnext_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_Mul[] = { "_vector_field_last_convnext_convnext_0_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Pad */ uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Pad[] = { "_vector_field_last_convnext_convnext_1_Mul_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_last_convnext_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { "_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139091769233346f, .offset= -123}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069035259075463f, .offset= -137}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003597692120820f, .offset= -156}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_2d[] = { "_vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight", "tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055054221302271f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055054221302271f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055054221302271f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055054221302271f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_Mul_1 */ Qnn_Param_t params__vector_field_last_convnext_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_Mul_1[] = { "_vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0055054221302271f, .offset= -150}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042060902342200f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013579609803855f, .offset= -148}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization[] = { "_vector_field_last_convnext_convnext_1_norm_Transpose_output_0", "tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight", "tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0292171966284513f, .offset= -83}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0292171966284513f, .offset= -83}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0292171966284513f, .offset= -83}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0292171966284513f, .offset= -83}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0089833931997418f, .offset= -113}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023580291308463f, .offset= -218}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d[] = { "_vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight", "tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0414182133972645f, .offset= -165}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0414182133972645f, .offset= -165}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0414182133972645f, .offset= -165}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_50(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_50 */ Qnn_Param_t params__elementwiseneuron_50[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_50[] = { "_vector_field_last_convnext_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_50[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153221879154444f, .offset= -11}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_50", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_50, // Node Params 1, // Num Node Params inputs__elementwiseneuron_50, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_50, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153221879154444f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0153221879154444f, .offset= -11}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069011650048196f, .offset= -128}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007763413595967f, .offset= -78}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d[] = { "_vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight", "tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164848845452070f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164848845452070f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164848845452070f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0164848845452070f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_1_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028998237103224f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_1_gamma), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_Mul_2 */ Qnn_Param_t params__vector_field_last_convnext_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_Mul_2[] = { "tts_ttl_vector_field_last_convnext_convnext_1_gamma", "_vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0051045003347099f, .offset= -162}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_Add */ Qnn_Param_t params__vector_field_last_convnext_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_Add[] = { "_vector_field_last_convnext_convnext_1_Mul_output_0", "_vector_field_last_convnext_convnext_1_Mul_2_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_1_Add, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_1_Mul_3 */ Qnn_Param_t params__vector_field_last_convnext_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_1_Mul_3[] = { "_vector_field_last_convnext_convnext_1_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_1_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_Mul */ Qnn_Param_t params__vector_field_last_convnext_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_Mul[] = { "_vector_field_last_convnext_convnext_1_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Pad */ uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Pad[] = { "_vector_field_last_convnext_convnext_2_Mul_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_last_convnext_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { "_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0139217590913177f, .offset= -122}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047394745051861f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008531069615856f, .offset= -48}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_2d[] = { "_vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight", "tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044947355054319f, .offset= -41}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044947355054319f, .offset= -41}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044947355054319f, .offset= -41}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044947355054319f, .offset= -41}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_Mul_1 */ Qnn_Param_t params__vector_field_last_convnext_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_Mul_1[] = { "_vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044711856171489f, .offset= -40}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0059287524782121f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015028518391773f, .offset= -106}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization[] = { "_vector_field_last_convnext_convnext_2_norm_Transpose_output_0", "tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight", "tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392037108540535f, .offset= -93}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392037108540535f, .offset= -93}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392037108540535f, .offset= -93}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0392037108540535f, .offset= -93}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0087382318452001f, .offset= -145}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021015752572566f, .offset= -216}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d[] = { "_vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight", "tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0462681949138641f, .offset= -193}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0462681949138641f, .offset= -193}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0462681949138641f, .offset= -193}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_52(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_52 */ Qnn_Param_t params__elementwiseneuron_52[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_52[] = { "_vector_field_last_convnext_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_52[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119639933109283f, .offset= -14}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_52", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_52, // Node Params 1, // Num Node Params inputs__elementwiseneuron_52, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_52, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119639933109283f, .offset= -14}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0119639933109283f, .offset= -14}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0124275926500559f, .offset= -149}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015309470472857f, .offset= -156}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d[] = { "_vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight", "tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152270095422864f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152270095422864f, .offset= -157}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152270095422864f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0152270095422864f, .offset= -157}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_2_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032302201725543f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_2_gamma), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_Mul_2 */ Qnn_Param_t params__vector_field_last_convnext_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_Mul_2[] = { "tts_ttl_vector_field_last_convnext_convnext_2_gamma", "_vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0064350394532084f, .offset= -120}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_Add */ Qnn_Param_t params__vector_field_last_convnext_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_Add[] = { "_vector_field_last_convnext_convnext_2_Mul_output_0", "_vector_field_last_convnext_convnext_2_Mul_2_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_2_Add, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_2_Mul_3 */ Qnn_Param_t params__vector_field_last_convnext_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_2_Mul_3[] = { "_vector_field_last_convnext_convnext_2_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_2_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_Mul */ Qnn_Param_t params__vector_field_last_convnext_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_Mul[] = { "_vector_field_last_convnext_convnext_2_Mul_3_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_Mul_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Pad */ uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Pad[] = { "_vector_field_last_convnext_convnext_3_Mul_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0[] = {1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__vector_field_last_convnext_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { "_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 512, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 512, 1, 196}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 196, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0142247937619686f, .offset= -116}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight[] = {1, 5, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032836440950632f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018818539101630f, .offset= -152}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_2d[] = { "_vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight", "tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095300879329443f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095300879329443f, .offset= -129}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095300879329443f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095300879329443f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_Mul_1 */ Qnn_Param_t params__vector_field_last_convnext_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_Mul_1[] = { "_vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095300879329443f, .offset= -129}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043446468189359f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015011089853942f, .offset= -86}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization[] = { "_vector_field_last_convnext_convnext_3_norm_Transpose_output_0", "tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight", "tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0408979430794716f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0408979430794716f, .offset= -119}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0408979430794716f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0408979430794716f, .offset= -119}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight[] = {1, 1, 512, 1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0090706134214997f, .offset= -118}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias[] = {1024}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0016637030057609f, .offset= -218}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d[] = { "_vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight", "tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0316286534070969f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0316286534070969f, .offset= -170}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0316286534070969f, .offset= -170}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_54(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_54 */ Qnn_Param_t params__elementwiseneuron_54[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_54[] = { "_vector_field_last_convnext_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_act_Mul_1_output_0[] = {1, 1024, 192}; Qnn_Tensor_t outputs__elementwiseneuron_54[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111626684665680f, .offset= -15}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_54", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_54, // Node Params 1, // Num Node Params inputs__elementwiseneuron_54, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_54, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 1024, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111626684665680f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 1024}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111626684665680f, .offset= -15}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight[] = {1, 1, 1024, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0070950626395643f, .offset= -116}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias[] = {512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015573635464534f, .offset= -226}}}, .rank= 1, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d[] = { "_vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight", "tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0553914085030556f, .offset= -202}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0553914085030556f, .offset= -202}}}, .rank= 4, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate[] = { "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0553914085030556f, .offset= -202}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0553914085030556f, .offset= -202}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_last_convnext_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_last_convnext_convnext_3_gamma[] = {1, 1, 512}; VALIDATE(model.addTensor("tts_ttl_vector_field_last_convnext_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_last_convnext_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028109222184867f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_tts_ttl_vector_field_last_convnext_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_last_convnext_convnext_3_gamma), .dataSize=BINLEN(tts_ttl_vector_field_last_convnext_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_Mul_2 */ Qnn_Param_t params__vector_field_last_convnext_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_Mul_2[] = { "tts_ttl_vector_field_last_convnext_convnext_3_gamma", "_vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_Mul_2_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0125792780891061f, .offset= -202}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_Add */ Qnn_Param_t params__vector_field_last_convnext_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_Add[] = { "_vector_field_last_convnext_convnext_3_Mul_output_0", "_vector_field_last_convnext_convnext_3_Mul_2_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_Add_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143187390640378f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_3_Add, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_Mul_3 */ Qnn_Param_t params__vector_field_last_convnext_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_Mul_3[] = { "_vector_field_last_convnext_convnext_3_Add_output_0", "latent_mask" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_Mul_3_output_0[] = {1, 192, 512}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143187390640378f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_last_convnext_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf */ uint32_t dimensions__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf_perm[] = {3}; uint32_t _vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf[] = { "_vector_field_last_convnext_convnext_3_Mul_3_output_0" }; uint32_t dimensions__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf[] = {1, 512, 192}; Qnn_Tensor_t outputs__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143187390640378f, .offset= -136}}}, .rank= 3, .dimensions=dimensions__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf, // Node Params 1, // Num Node Params inputs__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_out_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_net_Conv_reshape_to_2d */ const char* inputs__vector_field_proj_out_net_Conv_reshape_to_2d[] = { "_vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf" }; uint32_t dimensions__vector_field_proj_out_net_Conv_reshape_to_2d[] = {1, 512, 1, 192}; Qnn_Tensor_t outputs__vector_field_proj_out_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143187390640378f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_out_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_proj_out_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_out_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _vector_field_proj_out_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_out_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc[] = { "_vector_field_proj_out_net_Conv_reshape_to_2d" }; uint32_t dimensions__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 192, 512}; Qnn_Tensor_t outputs__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143187390640378f, .offset= -136}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_ttl_vector_field_proj_out_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_ttl_vector_field_proj_out_net_weight[] = {1, 1, 512, 144}; VALIDATE(model.addTensor("tts_ttl_vector_field_proj_out_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_ttl_vector_field_proj_out_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0072496598586440f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_tts_ttl_vector_field_proj_out_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_ttl_vector_field_proj_out_net_weight), .dataSize=BINLEN(tts_ttl_vector_field_proj_out_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__vector_field_proj_out_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_net_Conv_2d */ uint32_t dimensions__vector_field_proj_out_net_Conv_2d_dilation[] = {2}; uint32_t _vector_field_proj_out_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__vector_field_proj_out_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _vector_field_proj_out_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__vector_field_proj_out_net_Conv_2d_stride[] = {2}; uint32_t _vector_field_proj_out_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__vector_field_proj_out_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_out_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_out_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__vector_field_proj_out_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_out_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_out_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_out_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__vector_field_proj_out_net_Conv_2d[] = { "_vector_field_proj_out_net_Conv_reshape_to_2d_nhwc", "tts_ttl_vector_field_proj_out_net_weight" }; uint32_t dimensions__vector_field_proj_out_net_Conv_intermediate[] = {1, 1, 192, 144}; Qnn_Tensor_t outputs__vector_field_proj_out_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430774055421352f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_out_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_net_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__vector_field_proj_out_net_Conv_2d, // Node Params 5, // Num Node Params inputs__vector_field_proj_out_net_Conv_2d, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_proj_out_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_out_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_net_Conv_intermediate_nchw */ uint32_t dimensions__vector_field_proj_out_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _vector_field_proj_out_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__vector_field_proj_out_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_out_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_out_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_proj_out_net_Conv_intermediate_nchw[] = { "_vector_field_proj_out_net_Conv_intermediate" }; uint32_t dimensions__vector_field_proj_out_net_Conv_intermediate_nchw[] = {1, 144, 1, 192}; Qnn_Tensor_t outputs__vector_field_proj_out_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430774055421352f, .offset= -142}}}, .rank= 4, .dimensions=dimensions__vector_field_proj_out_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_proj_out_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__vector_field_proj_out_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_out_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_out_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_net_Conv_intermediate */ const char* inputs__vector_field_proj_out_net_Conv_intermediate[] = { "_vector_field_proj_out_net_Conv_intermediate_nchw" }; uint32_t dimensions__vector_field_proj_out_net_Conv_output_0[] = {1, 144, 192}; Qnn_Tensor_t outputs__vector_field_proj_out_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430774055421352f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__vector_field_proj_out_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__vector_field_proj_out_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_out_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_out_net_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_net_Conv_output_0_nfc */ uint32_t dimensions__vector_field_proj_out_net_Conv_output_0_nfc_perm[] = {3}; uint32_t _vector_field_proj_out_net_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__vector_field_proj_out_net_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__vector_field_proj_out_net_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_vector_field_proj_out_net_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__vector_field_proj_out_net_Conv_output_0_nfc[] = { "_vector_field_proj_out_net_Conv_output_0" }; uint32_t dimensions__vector_field_proj_out_net_Conv_output_0_nfc[] = {1, 192, 144}; Qnn_Tensor_t outputs__vector_field_proj_out_net_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_net_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430774055421352f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__vector_field_proj_out_net_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_net_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__vector_field_proj_out_net_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__vector_field_proj_out_net_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__vector_field_proj_out_net_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__vector_field_proj_out_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _vector_field_proj_out_Mul */ Qnn_Param_t params__vector_field_proj_out_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__vector_field_proj_out_Mul[] = { "_vector_field_proj_out_net_Conv_output_0_nfc", "latent_mask" }; uint32_t dimensions__vector_field_proj_out_Mul_output_0[] = {1, 192, 144}; Qnn_Tensor_t outputs__vector_field_proj_out_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_vector_field_proj_out_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0430774055421352f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__vector_field_proj_out_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_vector_field_proj_out_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__vector_field_proj_out_Mul, // Node Params 1, // Num Node Params inputs__vector_field_proj_out_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__vector_field_proj_out_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Mul */ Qnn_Param_t params__Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__Mul[] = { "_Reciprocal_output_0_nfc", "_vector_field_proj_out_Mul_output_0" }; uint32_t dimensions__Mul_output_0[] = {1, 192, 144}; Qnn_Tensor_t outputs__Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043077408336103f, .offset= -142}}}, .rank= 3, .dimensions=dimensions__Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Mul, // Node Params 1, // Num Node Params inputs__Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Add */ Qnn_Param_t params__Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__Add[] = { "noisy_latent", "_Mul_output_0" }; uint32_t dimensions__Add_output_0[] = {1, 192, 144}; Qnn_Tensor_t outputs__Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0336016789078712f, .offset= -126}}}, .rank= 3, .dimensions=dimensions__Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Add, // Node Params 1, // Num Node Params inputs__Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _Mul_1 */ Qnn_Param_t params__Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__Mul_1[] = { "_Add_output_0", "latent_mask" }; uint32_t dimensions_denoised_latent[] = {1, 192, 144}; Qnn_Tensor_t outputs__Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "denoised_latent", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0323383510112762f, .offset= -130}}}, .rank= 3, .dimensions=dimensions_denoised_latent, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__Mul_1, // Node Params 1, // Num Node Params inputs__Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } QNN_API ModelError_t QnnModel_composeGraphs(Qnn_BackendHandle_t backendHandle, QNN_INTERFACE_VER_TYPE interface, Qnn_ContextHandle_t contextHandle, const GraphConfigInfo_t** graphsConfigInfo, const uint32_t numGraphsConfigInfo, GraphInfoPtr_t** graphsInfo, uint32_t* numGraphsInfo, bool debug, QnnLog_Callback_t logCallback, QnnLog_Level_t maxLogLevel) { ModelError_t err = MODEL_NO_ERROR; /* model/graph for vector_estimator_htp*/ QnnModel vector_estimator_htp; const QnnGraph_Config_t** graphConfigs = nullptr; VALIDATE(getQnnGraphConfigFromInfo("vector_estimator_htp", graphsConfigInfo, numGraphsConfigInfo, graphConfigs), err); VALIDATE(vector_estimator_htp.initialize(backendHandle, interface, contextHandle, "vector_estimator_htp", debug, DO_GRAPH_NODE_VALIDATIONS, graphConfigs), err); VALIDATE(addTensor_noisy_latent(vector_estimator_htp), err); VALIDATE(addTensor_text_emb(vector_estimator_htp), err); VALIDATE(addTensor_style_ttl(vector_estimator_htp), err); VALIDATE(addTensor_latent_mask(vector_estimator_htp), err); VALIDATE(addTensor_text_mask(vector_estimator_htp), err); VALIDATE(addTensor_current_step(vector_estimator_htp), err); VALIDATE(addTensor_total_step(vector_estimator_htp), err); VALIDATE(addNode_noisy_latent_ncf(vector_estimator_htp), err); VALIDATE(addNode_style_ttl_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_net_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_net_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_proj_in_net_weight(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_net_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_net_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_net_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_net_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_key_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_key_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_key_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_value_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_key_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addNode__Reshape(vector_estimator_htp), err); VALIDATE(addNode__Reshape_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_in_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_ReduceSum(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_ReduceSum_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_14(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_15(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3118(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_5_attention_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3163(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_11_attention_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3208(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_17_attention_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3253(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_23_attention_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__Div(vector_estimator_htp), err); VALIDATE(addTensor__Reciprocal_coeff(vector_estimator_htp), err); VALIDATE(addNode__Reciprocal(vector_estimator_htp), err); VALIDATE(addNode__Reciprocal_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3102(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_attn_W_key_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_key_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_key_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3103(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_attn_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Reshape_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Transpose_1(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_3_attn_Constant_44_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Equal_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Split_2(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3147(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_9_attn_W_key_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_key_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_key_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3148(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_9_attn_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Split_2(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3192(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_15_attn_W_key_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_key_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_key_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3193(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_15_attn_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Split_2(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3237(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_21_attn_W_key_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_key_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_key_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3238(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_21_attn_W_value_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_value_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_value_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Split_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_sinusoidal_Reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Split_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Split_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Equal(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Split_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Split_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Split_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Split_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Split_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Split_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_7(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_8(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_9(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Concat_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_7(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_8(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_9(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Concat_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_7(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_8(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_9(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Concat_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_7(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_8(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_9(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Concat_2(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_time_encoder_sinusoidal_Constant_2_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_sinusoidal_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Concat_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Concat_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Concat_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Concat_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Concat_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Concat_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Concat_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Concat_2(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_time_encoder_sinusoidal_Constant_3_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_sinusoidal_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_3_attn_Cast_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Div(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_3_attn_Cast_1_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Div_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_sinusoidal_Sin(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_sinusoidal_Cos(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Slice_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Slice_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Slice_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Slice_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Slice_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Slice_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Slice_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Slice_5(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_attn_theta(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_7(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_sinusoidal_Concat(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Sin(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Cos(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Sin_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Cos_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_time_encoder_mlp_0_linear_weight_permute(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_time_encoder_mlp_0_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_mlp_mlp_0_linear_Gemm(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_mlp_mlp_1_Softplus(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_12(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_13(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_12(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_13(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_12(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_13(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_11(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_12(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_13(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_mlp_mlp_1_Tanh(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Sub_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Add_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Sub_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Add_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Sub_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Add_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Sub_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Add_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_mlp_mlp_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Concat_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Concat_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Concat_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Concat_4(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_time_encoder_mlp_2_linear_weight_permute(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_time_encoder_mlp_2_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_mlp_mlp_2_linear_Gemm(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_time_encoder_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_Transpose(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_56(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3095(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_1_linear_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_linear_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_linear_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3140(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_7_linear_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_7_linear_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_7_linear_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3185(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_13_linear_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_13_linear_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_13_linear_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3230(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_19_linear_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_19_linear_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_19_linear_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_Transpose_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_7_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_13_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_19_Transpose(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_1_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_1_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_2_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_2_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_0_convnext_3_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_0_convnext_3_Mul_3_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_Add_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_8(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_2_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_2_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3101(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_attn_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Unsqueeze_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Concat(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Slice_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Slice_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Sub(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Add_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_MatMul(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_3_attn_Constant_39_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Div_4(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_3_attn_Constant_42_output_0(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Where_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Concat_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3110(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_attn_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_attn_Mul_14(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_3_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_3_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_10(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_4_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_4_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3116(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_5_attention_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Concat(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_5_attention_tanh_Tanh_output_0_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Div(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3119(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_5_attention_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_attention_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_5_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_5_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_5_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_12(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_14(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_1_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_1_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_16(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_2_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_2_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_18(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_6_convnext_3_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_6_convnext_3_Mul_3_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_7_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_7_Add_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_7_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_20(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_8_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_8_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3146(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_9_attn_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Unsqueeze_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Concat(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Slice_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Slice_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Sub(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Add_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Div_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Where_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Concat_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3155(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_9_attn_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_attn_Mul_14(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_9_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_9_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_9_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_22(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_10_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_10_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3161(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_11_attention_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Concat(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_11_attention_tanh_Tanh_output_0_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Div(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3164(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_11_attention_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_attention_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_11_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_11_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_11_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_24(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_26(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_1_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_1_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_28(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_2_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_2_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_30(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_12_convnext_3_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_12_convnext_3_Mul_3_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_13_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_13_Add_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_13_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_32(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_14_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_14_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3191(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_15_attn_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Unsqueeze_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Concat(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Slice_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Slice_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Sub(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Add_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Div_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Where_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Concat_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3200(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_15_attn_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_attn_Mul_14(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_15_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_15_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_15_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_34(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_16_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_16_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3206(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_17_attention_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Concat(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_17_attention_tanh_Tanh_output_0_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Div(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3209(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_17_attention_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_attention_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_17_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_17_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_17_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_36(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_38(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_1_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_1_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_40(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_2_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_2_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_42(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_18_convnext_3_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_18_convnext_3_Mul_3_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_19_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_19_Add_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_19_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_44(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_20_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_20_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3236(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_21_attn_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Unsqueeze_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Concat(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Slice_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Slice_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_6(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Sub(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Add_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Div_4(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Where_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Concat_5(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3245(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_21_attn_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_attn_Mul_14(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_21_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_21_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_21_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_46(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_22_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_22_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_W_query_linear_MatMul_pre_reshape(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3251(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_23_attention_W_query_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_W_query_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_W_query_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Split(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Unsqueeze(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Unsqueeze_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Concat(vector_estimator_htp), err); VALIDATE(addTensor__vector_field_main_blocks_23_attention_tanh_Tanh_output_0_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Div(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Softmax(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Where(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_MatMul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Split_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Concat_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Squeeze(vector_estimator_htp), err); VALIDATE(addTensor_onnx__MatMul_3254(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_23_attention_out_fc_linear_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_out_fc_linear_MatMul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_out_fc_linear_MatMul_post_reshape(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_attention_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_Add(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_23_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_main_blocks_23_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_main_blocks_23_Mul_1(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_48(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_0_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_0_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_50(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_1_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_1_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_52(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_2_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_2_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_Mul(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Pad(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Pad_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_dwconv_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_dwconv_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_Mul_1(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_norm_norm_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_norm_norm_LayerNormalization(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_norm_Transpose_1_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv1_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv1_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__elementwiseneuron_54(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_weight(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_pwconv2_bias(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_pwconv2_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_last_convnext_convnext_3_gamma(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_Mul_2(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_Add(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_Mul_3(vector_estimator_htp), err); VALIDATE(addNode__vector_field_last_convnext_convnext_3_Mul_3_output_0_ncf(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_net_Conv_reshape_to_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_net_Conv_reshape_to_2d_nhwc(vector_estimator_htp), err); VALIDATE(addTensor_tts_ttl_vector_field_proj_out_net_weight(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_net_Conv_2d(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_net_Conv_intermediate_nchw(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_net_Conv_intermediate(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_net_Conv_output_0_nfc(vector_estimator_htp), err); VALIDATE(addNode__vector_field_proj_out_Mul(vector_estimator_htp), err); VALIDATE(addNode__Mul(vector_estimator_htp), err); VALIDATE(addNode__Add(vector_estimator_htp), err); VALIDATE(addNode__Mul_1(vector_estimator_htp), err); // Add all models to array to get graphsInfo QnnModel* models [] = {&vector_estimator_htp}; uint32_t numModels = 1; // Populate the constructed graphs in provided output variables VALIDATE(getGraphInfoFromModels(*models, numModels, graphsInfo), err); *numGraphsInfo = numModels; return err; } // PREPARE_GRAPHS QNN_API ModelError_t QnnModel_freeGraphsInfo(GraphInfoPtr_t** graphsInfo, uint32_t numGraphsInfo){ return qnn_wrapper_api::freeGraphsInfo(graphsInfo, numGraphsInfo); } // FREEGRAPHINFO }