diff --git a/SampleApp/CMakeLists.txt b/SampleApp/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0926079ef5f9eda6abd53ef10a0331738318f64 --- /dev/null +++ b/SampleApp/CMakeLists.txt @@ -0,0 +1,11 @@ +#============================================================================= +# +# Copyright (c) 2022 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# +#============================================================================= + +cmake_minimum_required (VERSION 3.14) +project(SampleApp) +add_subdirectory(src) diff --git a/SampleApp/Makefile b/SampleApp/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d79d6c29ec9a62633054b4217877cfdc1c7f5ceb --- /dev/null +++ b/SampleApp/Makefile @@ -0,0 +1,146 @@ +# +# Copyright (c) 2020-2024 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define default +default: all + +# define package name +PACKAGE_NAME := $(notdir $(shell pwd)) + +# define library prerequisites list +sample_app := src +make_dir := make +EXE_SOURCES = $(sample_app) + +# define target_architecture +export TARGET_AARCH_VARS:= -march=x86-64 + +# define target name +export TARGET = linux-x86_64 + +# specify compiler +export CXX := clang++ + +.PHONY: all $(EXE_SOURCES) all_x86 all_android hexagon + +all: $(EXE_SOURCES) all_x86 all_android all_linux_oe_aarch64_gcc82 all_linux_oe_aarch64_gcc93 all_linux_oe_aarch64_gcc112 all_ubuntu_aarch64_gcc75 all_ubuntu_aarch64_gcc94 + +# Combined Targets +clean: clean_x86 clean_android clean_qnx + +all_x86: clean_x86 + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.linux-x86_64) + +clean_x86: + @rm -rf bin obj include + +# Android Targets + +all_android: aarch64-android + +aarch64-android: check_ndk clean_aarch64-android + $(call build_if_exists,$(sample_app),$(ANDROID_NDK_ROOT)/ndk-build APP_ALLOW_MISSING_DEPS=true APP_ABI="arm64-v8a" NDK_PROJECT_PATH=./ NDK_APPLICATION_MK=$(make_dir)/Application.mk APP_BUILD_SCRIPT=$(make_dir)/Android.mk) + @$(rename_target_dirs) + +clean_android: check_ndk clean_aarch64-android + +clean_aarch64-android: + @rm -rf bin/aarch64-android + @rm -rf obj/local/aarch64-android + +# QNX Target + +all_qnx: check_qnx + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.qnx-aarch64) + +clean_qnx: + @rm -rf bin obj include + +all_linux_oe_aarch64_gcc112: check_linux_oe_aarch64_gcc112 + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.oe-linux-aarch64-gcc11.2) + +clean_linux_oe_aarch64_gcc112: + @rm -rf bin/aarch64-oe-linux-gcc11.2 obj/aarch64-oe-linux-gcc11.2 + +check_linux_oe_aarch64_gcc112: +ifeq ($(QNN_AARCH64_LINUX_OE_GCC_112),) + $(error ERROR: QNN_AARCH64_LINUX_OE_GCC_112 not set, skipping compilation for Linux OE platform.) +endif + + +all_linux_oe_aarch64_gcc93: check_linux_oe_aarch64_gcc93 + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.oe-linux-aarch64-gcc9.3) + +clean_linux_oe_aarch64_gcc93: + @rm -rf bin/aarch64-oe-linux-gcc9.3 obj/aarch64-oe-linux-gcc9.3 + +check_linux_oe_aarch64_gcc93: +ifeq ($(QNN_AARCH64_LINUX_OE_GCC_93),) + $(error ERROR: QNN_AARCH64_LINUX_OE_GCC_93 not set, skipping compilation for Linux OE platform.) +endif + + +all_linux_oe_aarch64_gcc82: check_linux_oe_aarch64_gcc82 + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.oe-linux-aarch64-gcc8.2) + +clean_linux_oe_aarch64_gcc82: + @rm -rf bin/aarch64-oe-linux-gcc8.2 obj/aarch64-oe-linux-gcc8.2 + +check_linux_oe_aarch64_gcc82: +ifeq ($(QNN_AARCH64_LINUX_OE_GCC_82),) + $(error ERROR: QNN_AARCH64_LINUX_OE_GCC_82 not set, skipping compilation for Linux OE platform.) +endif + + +all_ubuntu_aarch64_gcc75: check_ubuntu_aarch64_gcc75 + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.ubuntu-aarch64-gcc7.5) + +clean_ubuntu_aarch64_gcc75: + @rm -rf bin/aarch64-ubuntu-gcc7.5 obj/aarch64-ubuntu-gcc7.5 + +check_ubuntu_aarch64_gcc75: +ifeq ($(QNN_AARCH64_UBUNTU_GCC_75),) + $(error ERROR: QNN_AARCH64_UBUNTU_GCC_75 not set, skipping compilation for Ubuntu platform.) +endif + +all_ubuntu_aarch64_gcc94: check_ubuntu_aarch64_gcc94 + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.ubuntu-aarch64-gcc9.4) + +clean_ubuntu_aarch64_gcc94: + @rm -rf bin/aarch64-ubuntu-gcc9.4 obj/aarch64-ubuntu-gcc9.4 + +check_ubuntu_aarch64_gcc94: +ifeq ($(QNN_AARCH64_UBUNTU_GCC_94),) + $(error ERROR: QNN_AARCH64_UBUNTU_GCC_94 not set, skipping compilation for Ubuntu platform.) +endif + + +# utilities +# Syntax: $(call build_if_exists ,) +build_if_exists = $(if $(wildcard $(1)),$(2),$(warning WARNING: $(1) does not exist. Skipping Compilation)) +rename_target_dirs = find . -type d -execdir rename 's/arm64-v8a/aarch64-android/' '{}' \+ \ + && mv libs/* bin/ \ + && rm -rf libs \ + +check_ndk: +ifeq ($(ANDROID_NDK_ROOT),) + $(error ERROR: ANDROID_NDK_ROOT not set, skipping compilation for Android platform(s).) +endif + +check_qnx: +ifeq ($(QNX_HOST),) + $(error ERROR: QNX_HOST not set, skipping compilation for QNX platform.) +endif +ifeq ($(QNX_TARGET),) + $(error ERROR: QNX_TARGET not set, skipping compilation for QNX platform.) +endif + +# Hexagon Target +hexagon: clean_hexagon + $(call build_if_exists,$(sample_app),-$(MAKE) -f $(make_dir)/Makefile.hexagon) + +clean_hexagon: + @rm -rf bin/hexagon obj/hexagon diff --git a/SampleApp/README.txt b/SampleApp/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..c295d0bf28752c7cd638430d06ca58228d8a0862 --- /dev/null +++ b/SampleApp/README.txt @@ -0,0 +1,2 @@ +For information on how to build and execute qnn-sample-app, +please point your web browser to ${QNN_SDK_ROOT}/docs/QNN/general/sample_app.html. \ No newline at end of file diff --git a/SampleApp/make/Android.mk b/SampleApp/make/Android.mk new file mode 100644 index 0000000000000000000000000000000000000000..c48ec2e493e23d4a0054c21d9aa9180bda8b1edf --- /dev/null +++ b/SampleApp/make/Android.mk @@ -0,0 +1,44 @@ +# ============================================================================== +# +# Copyright (c) 2020, 2022-2024 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# +# =============================================================== + +LOCAL_PATH := $(call my-dir) +SUPPORTED_TARGET_ABI := arm64-v8a x86 x86_64 + +#============================ Verify Target Info and Application Variables ========================================= +ifneq ($(filter $(TARGET_ARCH_ABI),$(SUPPORTED_TARGET_ABI)),) + ifneq ($(APP_STL), c++_static) + $(error Unsupported APP_STL: "$(APP_STL)") + endif +else + $(error Unsupported TARGET_ARCH_ABI: '$(TARGET_ARCH_ABI)') +endif + +#============================ Define Common Variables =============================================================== +# Include paths +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../../../../include/QNN +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../src/ +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../src/CachingUtil +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../src/Log +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../src/PAL/include +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../src/Utils +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../src/WrapperUtils +PACKAGE_C_INCLUDES += -I $(LOCAL_PATH)/../include/flatbuffers + +#========================== Define OpPackage Library Build Variables ============================================= +include $(CLEAR_VARS) +LOCAL_C_INCLUDES := $(PACKAGE_C_INCLUDES) +MY_SRC_FILES := $(wildcard $(LOCAL_PATH)/../src/*.cpp) +MY_SRC_FILES += $(wildcard $(LOCAL_PATH)/../src/Log/*.cpp) +MY_SRC_FILES += $(wildcard $(LOCAL_PATH)/../src/PAL/src/linux/*.cpp) +MY_SRC_FILES += $(wildcard $(LOCAL_PATH)/../src/PAL/src/common/*.cpp) +MY_SRC_FILES += $(wildcard $(LOCAL_PATH)/../src/Utils/*.cpp) +MY_SRC_FILES += $(wildcard $(LOCAL_PATH)/../src/WrapperUtils/*.cpp) +LOCAL_MODULE := qnn-sample-app +LOCAL_SRC_FILES := $(subst make/,,$(MY_SRC_FILES)) +LOCAL_LDLIBS := -lGLESv2 -lEGL +include $(BUILD_EXECUTABLE) diff --git a/SampleApp/make/Application.mk b/SampleApp/make/Application.mk new file mode 100644 index 0000000000000000000000000000000000000000..3ff8354f81aeb7642ef805aa2fad3e35ae6e6796 --- /dev/null +++ b/SampleApp/make/Application.mk @@ -0,0 +1,13 @@ +# ============================================================================== +# +# Copyright (c) 2020, 2022-2024 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# +# =============================================================== + +APP_ABI := arm64-v8a +APP_STL := c++_static +APP_PLATFORM := android-21 +APP_CPPFLAGS += -std=c++11 -O3 -Wall -Werror -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +APP_LDFLAGS += -lc -lm -ldl diff --git a/SampleApp/make/Makefile.hexagon b/SampleApp/make/Makefile.hexagon new file mode 100644 index 0000000000000000000000000000000000000000..7dbdeb02cb326992d22f41c3d2f17ef7565666b7 --- /dev/null +++ b/SampleApp/make/Makefile.hexagon @@ -0,0 +1,145 @@ +# +# Copyright (c) 2024 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories + +ifndef HEXAGON_SDK_ROOT +$(error "Hexagon SDK environment not set") +endif + +ifndef HEXAGON_TOOLS_ROOT +HEXAGON_TOOLS_ROOT = $(DEFAULT_HEXAGON_TOOLS_ROOT) +endif + +ifndef V +V=v69 +endif + +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_DSP_LOG := src/DspLog +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +QNN_API_INCLUDE := ../../../include/QNN +PAL_INCLUDE := src/PAL/include +QURT_INCLUDE := $(HEXAGON_SDK_ROOT)/rtos/qurt/compute$(V)/include/qurt/ +POSIX_INCLUDE := $(HEXAGON_SDK_ROOT)/rtos/qurt/compute$(V)/include/posix/ +HEXAGON_INCLUDES := $(HEXAGON_SDK_ROOT)/incs +HEXAGON_STDEF_INCLUDES := $(HEXAGON_SDK_ROOT)/incs/stddef + +QNN_TARGET ?= hexagon +export TARGET_DIR := ./bin/$(QNN_TARGET) + +CXX = $(HEXAGON_TOOLS_ROOT)/Tools/bin/hexagon-clang++ + +shared_library := $(TARGET_DIR)/libQnnSampleApp$(V).so +sample_app_libs := libfile.a + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: shared_library + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) -I$(QURT_INCLUDE) -I$(POSIX_INCLUDE) -I$(HEXAGON_INCLUDES) -I$(HEXAGON_STDEF_INCLUDES) + +# set compiler flags +# pthread is needed for AIC and HTP-MCP Backend +COMMON_CXXFLAGS = -fPIC -Wall -Werror -fno-exceptions $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -O0 -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -O3 -g +LDFLAGS += $(COMMON_LDFLAGS) -G0 -Wl +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := src/Log/Logger.cpp +SOURCES_DSP_LOG := src/DspLog/LogUtils.cpp +SOURCES_PAL := src/PAL/src/linux/DynamicLoading.cpp +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_DSP_LOG := obj/$(QNN_TARGET)/DspLog/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_DSP_LOG := $(patsubst %.cpp,$(OBJ_DIR_DSP_LOG)/%.o,$(foreach x,$(SOURCES_DSP_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +# Rule to make executable +.PHONY: shared_library +shared_library: $(sample_app_libs) $(shared_library) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +$(OBJ_DIR_DSP_LOG)/%.o: $(SRC_DIR_DSP_LOG)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_DSP_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +$(sample_app_libs): obj/hexagon/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_DSP_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + ar rcs $@ $^ + mv $@ $(TARGET_DIR) + +#Compile +$(shared_library): obj/hexagon/main.o + cp $(HEXAGON_TOOLS_ROOT)/Tools/target/hexagon/lib/$(V)/G0/pic/libc++.so.1 . + cp $(HEXAGON_TOOLS_ROOT)/Tools/target/hexagon/lib/$(V)/G0/pic/libc++.so.1.0 . + cp $(HEXAGON_TOOLS_ROOT)/Tools/target/hexagon/lib/$(V)/G0/pic/libc++abi.so.1 . + cp $(HEXAGON_TOOLS_ROOT)/Tools/target/hexagon/lib/$(V)/G0/pic/libc++abi.so.1.0 . + $(CXX) $(LDFLAGS) $(INCLUDES) -o $@ $< $(TARGET_DIR)/$(sample_app_libs) + rm $(TARGET_DIR)/$(sample_app_libs) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_DSP_LOG): | $(OBJ_DIR_DSP_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/make/Makefile.linux-x86_64 b/SampleApp/make/Makefile.linux-x86_64 new file mode 100644 index 0000000000000000000000000000000000000000..a2a206529ba6812723702578f03f837f1d406948 --- /dev/null +++ b/SampleApp/make/Makefile.linux-x86_64 @@ -0,0 +1,116 @@ +# +# Copyright (c) 2020, 2022-2024 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +QNN_API_INCLUDE := ../../../include/QNN +PAL_INCLUDE := src/PAL/include + +QNN_TARGET ?= x86_64-linux-clang +export TARGET_DIR := ./bin/$(QNN_TARGET) + +qnn-sample-app := $(TARGET_DIR)/qnn-sample-app + +# define target architecture if not previously defined, default is x86 +ifndef TARGET_AARCH_VARS +TARGET_AARCH_VARS:= -march=x86-64 +endif + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: $(qnn-sample-app) + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) + +# set compiler flags +# pthread is needed for AIC and HTP-MCP Backend +COMMON_CXXFLAGS = -std=c++11 -fno-exceptions -fno-rtti -fPIC -Wall -Werror -pg -pthread $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC -pthread + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -march=x86-64 -O0 -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -march=x86-64 -O3 -Wno-write-strings -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +LDFLAGS += $(COMMON_LDFLAGS) -fvisibility=hidden -flto +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := $(wildcard $(SRC_DIR_LOG)/*.cpp) +SOURCES_PAL := $(wildcard $(SRC_DIR_PAL_LINUX)/*.cpp) +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +#LIBS=-l/usr/lib/x86_64-linux-gnu/libflatbuffers.a +LIBS=-ldl + +# Rule to make executable +.PHONY: qnn-sample-app +qnn-sample-app: $(qnn-sample-app) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +# Compile +$(qnn-sample-app): obj/x86_64-linux-clang/main.o obj/x86_64-linux-clang/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + $(CXX) $(CXXFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/make/Makefile.oe-linux-aarch64-gcc11.2 b/SampleApp/make/Makefile.oe-linux-aarch64-gcc11.2 new file mode 100644 index 0000000000000000000000000000000000000000..a50fc1da68639fb44fc34fecf8635d99b2abc9b6 --- /dev/null +++ b/SampleApp/make/Makefile.oe-linux-aarch64-gcc11.2 @@ -0,0 +1,114 @@ +# +# Copyright (c) 2023 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +ifeq ($(shell test -d ../../target && echo 0),0) + QNN_API_INCLUDE := ../../include +else + QNN_API_INCLUDE := ../../../include/QNN +endif +PAL_INCLUDE := src/PAL/include + +QNN_TARGET ?= aarch64-oe-linux-gcc11.2 +export TARGET_DIR := ./bin/$(QNN_TARGET) +CXX=$(QNN_AARCH64_LINUX_OE_GCC_112)/sysroots/x86_64-qtisdk-linux/usr/bin/aarch64-oe-linux/aarch64-oe-linux-g++ --sysroot=$(QNN_AARCH64_LINUX_OE_GCC_112)/sysroots/armv8a-oe-linux + +qnn-sample-app := $(TARGET_DIR)/qnn-sample-app + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: $(qnn-sample-app) + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) + +# set compiler flags +COMMON_CXXFLAGS = -ldl -std=gnu++11 -fPIC -Wl,-lstdc++ -Wall -Werror -fno-exceptions -fno-rtti -fPIC -pg $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -Wno-write-strings -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +LDFLAGS += $(COMMON_LDFLAGS) -fvisibility=hidden -flto +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := $(wildcard $(SRC_DIR_LOG)/*.cpp) +SOURCES_PAL := $(wildcard $(SRC_DIR_PAL_LINUX)/*.cpp) +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +LIBS=-ldl + +# Rule to make executable +.PHONY: qnn-sample-app +qnn-sample-app: $(qnn-sample-app) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +# Compile +$(qnn-sample-app): obj/$(QNN_TARGET)/main.o obj/$(QNN_TARGET)/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + $(CXX) $(CXXFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/make/Makefile.oe-linux-aarch64-gcc8.2 b/SampleApp/make/Makefile.oe-linux-aarch64-gcc8.2 new file mode 100644 index 0000000000000000000000000000000000000000..97ff641d48699f6f4b5c2c91e10d90ed3c7ddc55 --- /dev/null +++ b/SampleApp/make/Makefile.oe-linux-aarch64-gcc8.2 @@ -0,0 +1,114 @@ +# +# Copyright (c) 2021-2023 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +ifeq ($(shell test -d ../../target && echo 0),0) + QNN_API_INCLUDE := ../../include +else + QNN_API_INCLUDE := ../../../include/QNN +endif +PAL_INCLUDE := src/PAL/include + +QNN_TARGET ?= aarch64-oe-linux-gcc8.2 +export TARGET_DIR := ./bin/$(QNN_TARGET) +CXX=$(QNN_AARCH64_LINUX_OE_GCC_82)/sysroots/x86_64-oesdk-linux/usr/bin/aarch64-oe-linux/aarch64-oe-linux-g++ --sysroot=$(QNN_AARCH64_LINUX_OE_GCC_82)/sysroots/aarch64-oe-linux + +qnn-sample-app := $(TARGET_DIR)/qnn-sample-app + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: $(qnn-sample-app) + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) + +# set compiler flags +COMMON_CXXFLAGS = -ldl -std=gnu++11 -fPIC -Wl,-lstdc++ -Wall -Werror -fno-exceptions -fno-rtti -fPIC -pg $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -Wno-write-strings -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +LDFLAGS += $(COMMON_LDFLAGS) -fvisibility=hidden -flto +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := $(wildcard $(SRC_DIR_LOG)/*.cpp) +SOURCES_PAL := $(wildcard $(SRC_DIR_PAL_LINUX)/*.cpp) +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +LIBS=-ldl + +# Rule to make executable +.PHONY: qnn-sample-app +qnn-sample-app: $(qnn-sample-app) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +# Compile +$(qnn-sample-app): obj/$(QNN_TARGET)/main.o obj/$(QNN_TARGET)/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + $(CXX) $(CXXFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/make/Makefile.oe-linux-aarch64-gcc9.3 b/SampleApp/make/Makefile.oe-linux-aarch64-gcc9.3 new file mode 100644 index 0000000000000000000000000000000000000000..820f2e19339215313d19e7b250104f58af394f54 --- /dev/null +++ b/SampleApp/make/Makefile.oe-linux-aarch64-gcc9.3 @@ -0,0 +1,114 @@ +# +# Copyright (c) 2021-2023 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +ifeq ($(shell test -d ../../target && echo 0),0) + QNN_API_INCLUDE := ../../include +else + QNN_API_INCLUDE := ../../../include/QNN +endif +PAL_INCLUDE := src/PAL/include + +QNN_TARGET ?= aarch64-oe-linux-gcc9.3 +export TARGET_DIR := ./bin/$(QNN_TARGET) +CXX=$(QNN_AARCH64_LINUX_OE_GCC_93)/sysroots/x86_64-oesdk-linux/usr/bin/aarch64-oe-linux/aarch64-oe-linux-g++ --sysroot=$(QNN_AARCH64_LINUX_OE_GCC_93)/sysroots/aarch64-oe-linux + +qnn-sample-app := $(TARGET_DIR)/qnn-sample-app + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: $(qnn-sample-app) + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) + +# set compiler flags +COMMON_CXXFLAGS = -ldl -std=gnu++11 -fPIC -Wl,-lstdc++ -Wall -Werror -fno-exceptions -fno-rtti -fPIC -pg $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -Wno-write-strings -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +LDFLAGS += $(COMMON_LDFLAGS) -fvisibility=hidden -flto +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := $(wildcard $(SRC_DIR_LOG)/*.cpp) +SOURCES_PAL := $(wildcard $(SRC_DIR_PAL_LINUX)/*.cpp) +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +LIBS=-ldl + +# Rule to make executable +.PHONY: qnn-sample-app +qnn-sample-app: $(qnn-sample-app) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +# Compile +$(qnn-sample-app): obj/$(QNN_TARGET)/main.o obj/$(QNN_TARGET)/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + $(CXX) $(CXXFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/make/Makefile.ubuntu-aarch64-gcc7.5 b/SampleApp/make/Makefile.ubuntu-aarch64-gcc7.5 new file mode 100644 index 0000000000000000000000000000000000000000..b08aecdbf13c4891547eae3379d8374b6fadd88e --- /dev/null +++ b/SampleApp/make/Makefile.ubuntu-aarch64-gcc7.5 @@ -0,0 +1,114 @@ +# +# Copyright (c) 2021-2023 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +ifeq ($(shell test -d ../../target && echo 0),0) + QNN_API_INCLUDE := ../../include +else + QNN_API_INCLUDE := ../../../include/QNN +endif +PAL_INCLUDE := src/PAL/include + +QNN_TARGET ?= aarch64-ubuntu-gcc7.5 +export TARGET_DIR := ./bin/$(QNN_TARGET) +CXX=$(QNN_AARCH64_UBUNTU_GCC_75)/root/bin/aarch64-linux-gnu-g++ + +qnn-sample-app := $(TARGET_DIR)/qnn-sample-app + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: $(qnn-sample-app) + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) + +# set compiler flags +COMMON_CXXFLAGS = -ldl -std=gnu++11 -fPIC -Wl,-lstdc++ -Wall -Werror -fno-exceptions -fno-rtti -fPIC -pg $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -Wno-write-strings -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +LDFLAGS += $(COMMON_LDFLAGS) -fvisibility=hidden -flto +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := $(wildcard $(SRC_DIR_LOG)/*.cpp) +SOURCES_PAL := $(wildcard $(SRC_DIR_PAL_LINUX)/*.cpp) +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +LIBS=-ldl + +# Rule to make executable +.PHONY: qnn-sample-app +qnn-sample-app: $(qnn-sample-app) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +# Compile +$(qnn-sample-app): obj/$(QNN_TARGET)/main.o obj/$(QNN_TARGET)/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + $(CXX) $(CXXFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/make/Makefile.ubuntu-aarch64-gcc9.4 b/SampleApp/make/Makefile.ubuntu-aarch64-gcc9.4 new file mode 100644 index 0000000000000000000000000000000000000000..2f48c4ab39cfbe19047ebd67ae33b2c04e83946f --- /dev/null +++ b/SampleApp/make/Makefile.ubuntu-aarch64-gcc9.4 @@ -0,0 +1,114 @@ +# +# Copyright (c) 2023 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# + +# define relevant directories +SRC_DIR := src +SRC_DIR_LOG := src/Log +SRC_DIR_PAL_LINUX := src/PAL/src/linux +SRC_DIR_PAL_COMMON := src/PAL/src/common +SRC_DIR_UTILS := src/Utils +SRC_DIR_WRAPPER_UTILS := src/WrapperUtils +ifeq ($(shell test -d ../../target && echo 0),0) + QNN_API_INCLUDE := ../../include +else + QNN_API_INCLUDE := ../../../include/QNN +endif +PAL_INCLUDE := src/PAL/include + +QNN_TARGET ?= aarch64-ubuntu-gcc9.4 +export TARGET_DIR := ./bin/$(QNN_TARGET) +CXX=$(QNN_AARCH64_UBUNTU_GCC_94)/usr/bin/aarch64-linux-gnu-g++ --sysroot=$(QNN_AARCH64_UBUNTU_GCC_94) + +qnn-sample-app := $(TARGET_DIR)/qnn-sample-app + +.PHONY: sample_app_all +.DEFAULT: sample_app_all +sample_app_all: $(qnn-sample-app) + +# Include paths +INCLUDES += -I$(SRC_DIR) -I$(SRC_DIR_LOG) -I$(SRC_DIR_UTILS) -I$(SRC_DIR_WRAPPER_UTILS) -I$(PAL_INCLUDE) -I$(QNN_API_INCLUDE) + +# set compiler flags +COMMON_CXXFLAGS = -ldl -std=gnu++11 -fPIC -Wl,-lstdc++ -Wall -Werror -fno-exceptions -fno-rtti -fPIC -pg $(INCLUDES) +COMMON_LDFLAGS = -shared -s -fPIC + +ifdef QNN_DEBUG_ENABLE +CXXFLAGS += $(COMMON_CXXFLAGS) -g -DQNN_API="" +LDFLAGS += $(COMMON_LDFLAGS) +else +CXXFLAGS += $(COMMON_CXXFLAGS) -Wno-write-strings -fvisibility=hidden -DQNN_API="__attribute__((visibility(\"default\")))" +LDFLAGS += $(COMMON_LDFLAGS) -fvisibility=hidden -flto +endif + +# define library sources +SOURCES := $(wildcard $(SRC_DIR)/*.cpp) +SOURCES_LOG := $(wildcard $(SRC_DIR_LOG)/*.cpp) +SOURCES_PAL := $(wildcard $(SRC_DIR_PAL_LINUX)/*.cpp) +SOURCES_PAL += $(wildcard $(SRC_DIR_PAL_COMMON)/*.cpp) +SOURCES_UTILS := $(wildcard $(SRC_DIR_UTILS)/*.cpp) +SOURCES_WRAPPER_UTILS := $(wildcard $(SRC_DIR_WRAPPER_UTILS)/*.cpp) + +# define object directory +OBJ_ROOT := obj +OBJ_DIR := obj/$(QNN_TARGET) +OBJ_DIR_LOG := obj/$(QNN_TARGET)/Log/ +OBJ_DIR_PAL := obj/$(QNN_TARGET)/PAL +OBJ_DIR_UTILS := obj/$(QNN_TARGET)/Utils/ +OBJ_DIR_WRAPPER_UTILS := obj/$(QNN_TARGET)/WrapperUtils/ + +# setup object files in object directory +OBJECTS := $(patsubst %.cpp,$(OBJ_DIR)/%.o,$(foreach x,$(SOURCES),$(notdir $(x)))) +OBJECTS_LOG := $(patsubst %.cpp,$(OBJ_DIR_LOG)/%.o,$(foreach x,$(SOURCES_LOG),$(notdir $(x)))) +OBJECTS_PAL := $(patsubst %.cpp,$(OBJ_DIR_PAL)/%.o,$(foreach x,$(SOURCES_PAL),$(notdir $(x)))) +OBJECTS_UTILS := $(patsubst %.cpp,$(OBJ_DIR_UTILS)/%.o,$(foreach x,$(SOURCES_UTILS),$(notdir $(x)))) +OBJECTS_WRAPPER_UTILS := $(patsubst %.cpp,$(OBJ_DIR_WRAPPER_UTILS)/%.o,$(foreach x,$(SOURCES_WRAPPER_UTILS),$(notdir $(x)))) + +LIBS=-ldl + +# Rule to make executable +.PHONY: qnn-sample-app +qnn-sample-app: $(qnn-sample-app) + +# Implicit rule to compile and link object files +$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_LOG)/%.o: $(SRC_DIR_LOG)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_LINUX)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_PAL)/%.o: $(SRC_DIR_PAL_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_UTILS)/%.o: $(SRC_DIR_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +$(OBJ_DIR_WRAPPER_UTILS)/%.o: $(SRC_DIR_WRAPPER_UTILS)/%.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +# set up resources +directories := $(TARGET_DIR) $(OBJ_DIR) $(OBJ_DIR_LOG) $(OBJ_DIR_PAL) $(OBJ_DIR_UTILS) $(OBJ_DIR_WRAPPER_UTILS) + +# Compile +$(qnn-sample-app): obj/$(QNN_TARGET)/main.o obj/$(QNN_TARGET)/QnnSampleApp.o $(OBJECTS_LOG) $(OBJECTS_PAL) $(OBJECTS_UTILS) $(OBJECTS_WRAPPER_UTILS) | $(directories) + $(CXX) $(CXXFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + +# rule for object directory resource +$(OBJECTS): | $(OBJ_DIR) +$(OBJECTS_LOG): | $(OBJ_DIR_LOG) +$(OBJECTS_PAL): | $(OBJ_DIR_PAL) +$(OBJECTS_UTILS): | $(OBJ_DIR_UTILS) +$(OBJECTS_WRAPPER_UTILS): | $(OBJ_DIR_WRAPPER_UTILS) + +# rule to create directories +$(directories): + mkdir -p $@ + +.PHONY: clean +clean: + rm -rf $(OBJ_ROOT) $(TARGET_DIR) diff --git a/SampleApp/src/CMakeLists.txt b/SampleApp/src/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..75740834f1247b262e9c7e959eb53acb80abe57e --- /dev/null +++ b/SampleApp/src/CMakeLists.txt @@ -0,0 +1,40 @@ +#============================================================================== +# +# Copyright (c) 2022 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. +# +#============================================================================== + +set(APP "qnn-sample-app") +set(APP_SOURCES "QnnSampleApp.cpp" + "main.cpp" + "Log/Logger.cpp" + "Log/LogUtils.cpp" + "PAL/src/windows/Common.cpp" + "PAL/src/windows/Directory.cpp" + "PAL/src/windows/DynamicLoading.cpp" + "PAL/src/windows/FileOp.cpp" + "PAL/src/windows/Path.cpp" + "PAL/src/common/GetOpt.cpp" + "PAL/src/common/StringOp.cpp" + "Utils/DataUtil.cpp" + "Utils/DynamicLoadUtil.cpp" + "Utils/IOTensor.cpp" + "Utils/QnnSampleAppUtils.cpp" + "WrapperUtils/QnnWrapperUtils.cpp") + +add_executable(${APP} ${APP_SOURCES}) + +target_compile_definitions(${APP} PUBLIC "-DNOMINMAX") +target_link_libraries(${APP} PRIVATE Shlwapi Shell32) +set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd") +set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT /O2 /Ob3") +target_include_directories(${APP} PUBLIC CachingUtil + Log + PAL/include + Utils + WrapperUtils + ${CMAKE_BINARY_DIR} + ../../../../include/QNN + ./) diff --git a/SampleApp/src/DspLog/LogUtils.cpp b/SampleApp/src/DspLog/LogUtils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dc79b1ca78997b8876e1133e2db245b215875650 --- /dev/null +++ b/SampleApp/src/DspLog/LogUtils.cpp @@ -0,0 +1,22 @@ +//============================================================================== +// +// Copyright (c) 2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include "HAP_farf.h" +#include "LogUtils.hpp" + +#define PRINTLEN 1024 + +void qnn::log::utils::logDefaultCallback(const char* fmt, + QnnLog_Level_t level, + uint64_t timestamp, + va_list argp) { + char buffer[PRINTLEN] = ""; + std::lock_guard lock(sg_logUtilMutex); + vsnprintf(buffer, sizeof(buffer), fmt, argp); + FARF(ALWAYS, "[%x] %s", level, buffer); +} \ No newline at end of file diff --git a/SampleApp/src/Log/LogUtils.cpp b/SampleApp/src/Log/LogUtils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c9171e8fd5a43d0f5322bf4396203090dc2b9c1a --- /dev/null +++ b/SampleApp/src/Log/LogUtils.cpp @@ -0,0 +1,45 @@ +//============================================================================== +// +// Copyright (c) 2020, 2022, 2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include "LogUtils.hpp" + +void qnn::log::utils::logDefaultCallback(const char* fmt, + QnnLog_Level_t level, + uint64_t timestamp, + va_list argp) { + const char* levelStr = ""; + switch (level) { + case QNN_LOG_LEVEL_ERROR: + levelStr = " ERROR "; + break; + case QNN_LOG_LEVEL_WARN: + levelStr = "WARNING"; + break; + case QNN_LOG_LEVEL_INFO: + levelStr = " INFO "; + break; + case QNN_LOG_LEVEL_DEBUG: + levelStr = " DEBUG "; + break; + case QNN_LOG_LEVEL_VERBOSE: + levelStr = "VERBOSE"; + break; + case QNN_LOG_LEVEL_MAX: + levelStr = "UNKNOWN"; + break; + } + + double ms = (double)timestamp / 1000000.0; + // To avoid interleaved messages + { + std::lock_guard lock(sg_logUtilMutex); + fprintf(stdout, "%8.1fms [%-7s] ", ms, levelStr); + vfprintf(stdout, fmt, argp); + fprintf(stdout, "\n"); + } +} \ No newline at end of file diff --git a/SampleApp/src/Log/LogUtils.hpp b/SampleApp/src/Log/LogUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..16a77ea54002dac9b21fb601b2a55905d3779844 --- /dev/null +++ b/SampleApp/src/Log/LogUtils.hpp @@ -0,0 +1,29 @@ +//============================================================================== +// +// Copyright (c) 2020, 2022, 2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include +#include +#include +#include + +#include "QnnLog.h" + +namespace qnn { +namespace log { +namespace utils { + +// In non-hexagon app stdout is used and for hexagon farf logging is used +void logDefaultCallback(const char* fmt, QnnLog_Level_t level, uint64_t timestamp, va_list argp); + +static std::mutex sg_logUtilMutex; + +} // namespace utils +} // namespace log +} // namespace qnn diff --git a/SampleApp/src/Log/Logger.cpp b/SampleApp/src/Log/Logger.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d5b44958538d4098fef1db1ed1f111f1d3475baf --- /dev/null +++ b/SampleApp/src/Log/Logger.cpp @@ -0,0 +1,105 @@ +//============================================================================= +// +// Copyright (c) 2020-2022, 2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================= + +#include +#include +#include +#include + +#include "LogUtils.hpp" +#include "Logger.hpp" + +using namespace qnn::log; + +std::shared_ptr Logger::s_logger = nullptr; + +std::mutex Logger::s_logMutex; + +std::shared_ptr Logger::createLogger(QnnLog_Callback_t callback, + QnnLog_Level_t maxLevel, + QnnLog_Error_t* status) { + std::lock_guard lock(s_logMutex); + if ((maxLevel > QNN_LOG_LEVEL_VERBOSE) || (maxLevel == 0)) { + if (status) { + *status = QNN_LOG_ERROR_INVALID_ARGUMENT; + } + return nullptr; + } + if (!s_logger) { + s_logger = std::shared_ptr(new (std::nothrow) Logger(callback, maxLevel, status)); + } + *status = QNN_LOG_NO_ERROR; + return s_logger; +} + +Logger::Logger(QnnLog_Callback_t callback, QnnLog_Level_t maxLevel, QnnLog_Error_t* status) + : m_callback(callback), m_maxLevel(maxLevel), m_epoch(getTimestamp()) { + if (!callback) { + m_callback = utils::logDefaultCallback; + } +} + +void Logger::log(QnnLog_Level_t level, const char* file, long line, const char* fmt, ...) { + if (m_callback) { + if (level > m_maxLevel.load(std::memory_order_seq_cst)) { + return; + } + va_list argp; + va_start(argp, fmt); + std::string logString(fmt); + std::ignore = file; + std::ignore = line; + (*m_callback)(logString.c_str(), level, getTimestamp() - m_epoch, argp); + va_end(argp); + } +} + +uint64_t Logger::getTimestamp() const { + return std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); +} + +std::shared_ptr<::qnn::log::Logger> g_logger{nullptr}; + +bool qnn::log::initializeLogging() { + QnnLog_Level_t logLevel; + QnnLog_Error_t status; +#ifdef QNN_ENABLE_DEBUG + logLevel = QNN_LOG_LEVEL_DEBUG; +#else + logLevel = QNN_LOG_LEVEL_INFO; +#endif + // Default log stream is enabled in Core/Logger component + g_logger = ::qnn::log::Logger::createLogger(nullptr, logLevel, &status); + if (QNN_LOG_NO_ERROR != status || !g_logger) { + return false; + } + return true; +} + +QnnLog_Callback_t qnn::log::getLogCallback() { return g_logger->getLogCallback(); } + +QnnLog_Level_t qnn::log::getLogLevel() { return g_logger->getMaxLevel(); } + +bool qnn::log::isLogInitialized() { + if (g_logger == nullptr) { + return false; + } + return true; +} + +bool qnn::log::setLogLevel(QnnLog_Level_t maxLevel) { + if (!::qnn::log::Logger::isValid() || + !(maxLevel >= QNN_LOG_LEVEL_ERROR && maxLevel <= QNN_LOG_LEVEL_DEBUG)) { + return false; + } + + g_logger->setMaxLevel(maxLevel); + return true; +} diff --git a/SampleApp/src/Log/Logger.hpp b/SampleApp/src/Log/Logger.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f00c6be67a335e875915fa721431b5a471c6b6d2 --- /dev/null +++ b/SampleApp/src/Log/Logger.hpp @@ -0,0 +1,107 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include "QnnLog.h" + +#define __FILENAME__ (strrchr(__FILE__, '/') + 1) + +/** + * @brief Log something with the current logger. Always valid to call, though + * it won't do something if no logger has been set. + */ + +#define QNN_LOG_LEVEL(level, fmt, ...) \ + do { \ + auto logger = ::qnn::log::Logger::getLogger(); \ + if (logger) { \ + logger->log(level, __FILENAME__, __LINE__, fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +#define QNN_ERROR(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) + +#define QNN_ERROR_EXIT(fmt, ...) \ + { \ + QNN_ERROR(fmt, ##__VA_ARGS__); \ + exit(EXIT_FAILURE); \ + } + +#define QNN_WARN(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_WARN, fmt, ##__VA_ARGS__) + +#define QNN_INFO(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) + +#define QNN_DEBUG(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) + +#define QNN_VERBOSE(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, fmt, ##__VA_ARGS__) + +#define QNN_FUNCTION_ENTRY_LOG QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, "Entering %s", __func__) + +#define QNN_FUNCTION_EXIT_LOG QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, "Returning from %s", __func__) + +namespace qnn { +namespace log { + +bool initializeLogging(); + +QnnLog_Callback_t getLogCallback(); + +QnnLog_Level_t getLogLevel(); + +bool isLogInitialized(); + +bool setLogLevel(QnnLog_Level_t maxLevel); + +class Logger final { + public: + Logger(const Logger&) = delete; + Logger& operator=(const Logger&) = delete; + Logger(Logger&&) = delete; + Logger& operator=(Logger&&) = delete; + + void setMaxLevel(QnnLog_Level_t maxLevel) { + m_maxLevel.store(maxLevel, std::memory_order_seq_cst); + } + + QnnLog_Level_t getMaxLevel() { return m_maxLevel.load(std::memory_order_seq_cst); } + + QnnLog_Callback_t getLogCallback() { return m_callback; } + + void log(QnnLog_Level_t level, const char* file, long line, const char* fmt, ...); + + static std::shared_ptr createLogger(QnnLog_Callback_t callback, + QnnLog_Level_t maxLevel, + QnnLog_Error_t* status); + + static bool isValid() { return (s_logger != nullptr); } + + static std::shared_ptr getLogger() { return s_logger; } + + static void reset() { s_logger = nullptr; } + + private: + Logger(QnnLog_Callback_t callback, QnnLog_Level_t maxLevel, QnnLog_Error_t* status); + + uint64_t getTimestamp() const; + + QnnLog_Callback_t m_callback; + std::atomic m_maxLevel; + uint64_t m_epoch; + static std::shared_ptr s_logger; + static std::mutex s_logMutex; +}; + +} // namespace log +} // namespace qnn diff --git a/SampleApp/src/PAL/include/PAL/Debug.hpp b/SampleApp/src/PAL/include/PAL/Debug.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d03331c2684fb5aab74ef042c325eb0b312d2ba7 --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/Debug.hpp @@ -0,0 +1,21 @@ +//============================================================================ +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================ + +#pragma once + +#define DEBUG_ON 0 + +#if DEBUG_ON +#define DEBUG_MSG(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } +#else +#define DEBUG_MSG(...) +#endif diff --git a/SampleApp/src/PAL/include/PAL/Directory.hpp b/SampleApp/src/PAL/include/PAL/Directory.hpp new file mode 100644 index 0000000000000000000000000000000000000000..435d5ee7e543a0d4541f6a4aa2c5392e84d9114e --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/Directory.hpp @@ -0,0 +1,80 @@ +//============================================================================== +// +// Copyright (c) 2008-2014, 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +//--------------------------------------------------------------------------- +/// @file +/// This file includes APIs for directory operations on supported platforms +//--------------------------------------------------------------------------- + +#pragma once + +#include + +#include "PAL/FileOp.hpp" + +namespace pal { +class Directory; +} + +class pal::Directory { + public: + using DirMode = pal::FileOp::FileMode; + //--------------------------------------------------------------------------- + /// @brief + /// Creates a directory in the file system. + /// @param path + /// Name of directory to create. + /// @param dirmode + /// Directory mode + /// @return + /// True if + /// 1. create a directory successfully + /// 2. or directory exist already + /// False otherwise + /// + /// For example: + /// + /// - Create a directory in default. + /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + /// pal::Directory::Create(path, pal::Directory::DirMode::S_DEFAULT_); + /// pal::Directory::Create(path); + /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + /// + /// - Create a directory with specific permission. + /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + /// pal::Directory::Create(path, pal::Directory::DirMode::S_IRWXU_| + /// pal::Directory::DirMode::S_IRWXG_| + /// pal::Directory::DirMode::S_IRWXO_); + /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + /// + /// @note For windows, dirmode is not used. + /// @note For linux, dirmode is used to set the permission of the folder. + //--------------------------------------------------------------------------- + static bool create(const std::string &path, + pal::Directory::DirMode dirmode = pal::Directory::DirMode::S_DEFAULT_); + + //--------------------------------------------------------------------------- + /// @brief + /// Removes the entire directory whether it's empty or not. + /// @param path + /// Name of directory to delete. + /// @return + /// True if the directory was successfully deleted, false otherwise. + //--------------------------------------------------------------------------- + static bool remove(const std::string &path); + + //--------------------------------------------------------------------------- + /// @brief + /// Creates a directory and all parent directories required. + /// @param path + /// Path of directory to create. + /// @return + /// True if the directory was successfully created, false otherwise. + //--------------------------------------------------------------------------- + static bool makePath(const std::string &path); +}; diff --git a/SampleApp/src/PAL/include/PAL/DynamicLoading.hpp b/SampleApp/src/PAL/include/PAL/DynamicLoading.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8a2e18ceee4f8bbbc486dc9884c5668e6a791619 --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/DynamicLoading.hpp @@ -0,0 +1,99 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +//--------------------------------------------------------------------------- +/// @file +/// This file includes APIs for dynamic loading on supported platforms +//--------------------------------------------------------------------------- + +#pragma once + +#include + +namespace pal { +namespace dynamicloading { +// we only support subset of POSIX of dlopen/dlsym/dladdr/dlerror/dlclose +// except the following flags for dlopen, others should be done only +// when we really need them +// DL_NOW is MUST +// DL_LOCAL is enabled if not specified +enum { + DL_NOW = 0x0001, + DL_LOCAL = 0x0002, + DL_GLOBAL = 0x0004, +}; + +// specify this address to distingiush from NULL pointer +#define DL_DEFAULT (void *)(0x4) + +//--------------------------------------------------------------------------- +/// @brief +/// Loads the dynamic shared object +/// @param filename +/// If contains path separators, treat it as relative or absolute pathname +/// or search it for the rule of dynamic linker +/// @param flags +/// - DL_NOW: resolve undefined symbols before return. MUST be specified. +/// - DL_LOCAL: optional, but the default specified. Symbols defined in this +/// shared object are not made available to resolve references in subsequently +/// loaded shared objects +/// - DL_GLOBAL: optional, resolve symbol globally +/// @return +/// On success, a non-NULL handle for the loaded library. +/// On error, NULL +//--------------------------------------------------------------------------- +void *dlOpen(const char *filename, int flags); + +//--------------------------------------------------------------------------- +/// @brief +/// Obtain address of a symbol in a shared object or executable +/// @param handle +/// A handle of a dynamic loaded shared object returned by dlopen +/// @param symbol +/// A null-terminated symbol name +/// @return +/// On success, return the address associated with symbol +/// On error, NULL +//--------------------------------------------------------------------------- +void *dlSym(void *handle, const char *symbol); + +//--------------------------------------------------------------------------- +/// @brief +/// Translate the address of a symbol to the path of the belonging shared object +/// @param addr +/// Address of symbol in a shared object +/// @param path +/// Full name of shared object that contains address, usually it is an absolute path +/// @return +/// On success, return a non-zero value +/// On error, return 0 +//--------------------------------------------------------------------------- +int dlAddrToLibName(void *addr, std::string &name); + +//--------------------------------------------------------------------------- +/// @brief +/// Decrements the reference count on the dynamically loaded shared object +/// referred to by handle. If the reference count drops to 0, then the +/// object is unloaded. +/// @return +/// On success, 0; on error, a nonzero value +//--------------------------------------------------------------------------- +int dlClose(void *handle); + +//--------------------------------------------------------------------------- +/// @brief +/// Obtain error diagnostic for functions in the dl-family APIs. +/// @return +/// Returns a human-readable, null-terminated string describing the most +/// recent error that occurred from a call to one of the functions in the +/// dl-family APIs. +//--------------------------------------------------------------------------- +char *dlError(void); + +} // namespace dynamicloading +} // namespace pal diff --git a/SampleApp/src/PAL/include/PAL/FileOp.hpp b/SampleApp/src/PAL/include/PAL/FileOp.hpp new file mode 100644 index 0000000000000000000000000000000000000000..95bbfceb7e28ddcb97c858683c2f7376bc1c1292 --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/FileOp.hpp @@ -0,0 +1,239 @@ +//============================================================================== +// +// Copyright (c) 2008-2023 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +//------------------------------------------------------------------------------ +/// @file +/// This file includes APIs for file operations on the supported platforms +//------------------------------------------------------------------------------ + +#pragma once + +#include + +#include +#include +#include + +namespace pal { +class FileOp; +} + +//------------------------------------------------------------------------------ +/// @brief +/// FileOp contains OS Specific file system functionality. +//------------------------------------------------------------------------------ +class pal::FileOp { + public: + // enum for symbolic constants mode, strictly follow linux usage + // windows or another OS user should transfer the usage + // ref : http://man7.org/linux/man-pages/man2/open.2.html + enum class FileMode : uint32_t { + S_DEFAULT_ = 0777, + S_IRWXU_ = 0700, + S_IRUSR_ = 0400, + S_IWUSR_ = 0200, + S_IXUSR_ = 0100, + S_IRWXG_ = 0070, + S_IRGRP_ = 0040, + S_IWGRP_ = 0020, + S_IXGRP_ = 0010, + S_IRWXO_ = 0007, + S_IROTH_ = 0004, + S_IWOTH_ = 0002, + S_IXOTH_ = 0001 + }; + + //--------------------------------------------------------------------------- + /// @brief + /// Copies a file from one location to another, overwrites if the + /// destination already exists. + /// @param source + /// File name of the source file. + /// @param target + /// File name of the target file. + /// @return + /// True on success, otherwise false. + //--------------------------------------------------------------------------- + static bool copyOverFile(const std::string &source, const std::string &target); + + //--------------------------------------------------------------------------- + /// @brief + /// Checks whether the file exists or not. + /// @param fileName + /// File name of the source file, including its complete path. + /// @return + /// True on success, otherwise false. + //--------------------------------------------------------------------------- + static bool checkFileExists(const std::string &fileName); + + //--------------------------------------------------------------------------- + /// @brief + /// Renames an existing file. If the file with target name exists, this call + /// overwrites it with the file with source name. + /// @param source + /// Current File name. + /// @param target + /// New name of the file. + /// @param overwrite + /// Flag indicating to overwrite existing file with newName + /// @return + /// True if successful, otherwise false. + /// @warning + /// Does not work if source and target are on different filesystems. + //--------------------------------------------------------------------------- + static bool move(const std::string &source, const std::string &target, bool overwrite); + + //--------------------------------------------------------------------------- + /// @brief + /// Delete an existing file + /// @param fileName + /// File name of the file to be deleted. + /// @return + /// True if successful, otherwise false. + //--------------------------------------------------------------------------- + static bool deleteFile(const std::string &fileName); + + //--------------------------------------------------------------------------- + /// @brief + /// Check if path is a directory or not + /// @param path + /// Path to check + /// @return + /// True if successful, otherwise false. + //--------------------------------------------------------------------------- + static bool checkIsDir(const std::string &path); + + //--------------------------------------------------------------------------- + /// @brief Data type representing parts of a filename + //--------------------------------------------------------------------------- + typedef struct { + //--------------------------------------------------------------------------- + /// @brief Name of the file without the extension (i.e., basename) + //--------------------------------------------------------------------------- + std::string basename; + + //--------------------------------------------------------------------------- + /// @brief Name of the file extension (i.e., .txt or .hlnd, .html) + //--------------------------------------------------------------------------- + std::string extension; + + //--------------------------------------------------------------------------- + /// @brief + /// Location of the file (i.e., /abc/xyz/foo.bar <-- /abc/xyz/). + /// If the file name has no location then the Directory points to + /// empty string + //--------------------------------------------------------------------------- + std::string directory; + } FilenamePartsType_t; + + //--------------------------------------------------------------------------- + /// @brief + /// Determines the components of a given filename, being the directory, + /// basename and extension. If the file has no location or extension, these + /// components remain empty + /// @param filename + /// Path of the file for which the components are to be determined + /// @param filenameParts + /// Will contain the file name components when this function returns + /// @return + /// True if successful, false otherwise + //--------------------------------------------------------------------------- + static bool getFileInfo(const std::string &filename, FilenamePartsType_t &filenameParts); + + //--------------------------------------------------------------------------- + /// @brief + /// Typedef for a vector of FilenamePartsType_t + //--------------------------------------------------------------------------- + typedef std::vector FilenamePartsListType_t; + + //--------------------------------------------------------------------------- + /// @brief + /// Typedef for a vector of FilenamePartsType_t const iterator + //--------------------------------------------------------------------------- + typedef std::vector::const_iterator FilenamePartsListTypeIter_t; + + //--------------------------------------------------------------------------- + /// @brief + /// Returns a vector of FilenamePartsType_t objects for a given directory + /// @param path + /// Path to scan for files + /// @return + /// True if successful, false otherwise + //--------------------------------------------------------------------------- + static bool getFileInfoList(const std::string &path, FilenamePartsListType_t &filenamePartsList); + + //--------------------------------------------------------------------------- + /// @brief + /// Returns a vector of FilenamePartsType_t objects for a given directory + /// and the child directories inside. + /// @param path + /// Path to directory to scan for files for + /// @note if path is not a directory - the function will return false + /// @param filenamePartList + /// List to append to + /// @param ignoreDirs + /// If this flag is set to true, directories (and symbolic links to directories) + /// are not included in the list. Only actual files below the specified + /// directory path will be appended. + /// @return True if successful, false otherwise + /// @note Directories in list only populate Directory member variable of the struct. + /// That is Basename and Extension will be empty strings. + /// @note Symbolic links to directories are not followed. This is to avoid possible + /// infinite recursion. However the initial call to this method can have + /// path to be a symbolic link to a directory. If ignoreDirs is true, + /// symbolic links to directories are also ignored. + /// @note The order in which the files/directories are listed is platform + /// dependent. However files inside a directory always come before the + /// directory itself. + //--------------------------------------------------------------------------- + static bool getFileInfoListRecursive(const std::string &path, + FilenamePartsListType_t &filenamePartsList, + const bool ignoreDirs); + + //--------------------------------------------------------------------------- + /// @brief + /// Create an absolute path from the supplied path + /// @param path + /// Path should not contain trailing '/' or '\\' + /// @return + /// Return absolute path without trailing '/' or '\\' + //--------------------------------------------------------------------------- + static std::string getAbsolutePath(const std::string &path); + + //--------------------------------------------------------------------------- + /// @brief Get the file name from a path + //--------------------------------------------------------------------------- + static std::string getFileName(const std::string &file); + + //--------------------------------------------------------------------------- + /// @brief Get the directory path to a file + //--------------------------------------------------------------------------- + static std::string getDirectory(const std::string &file); + + //--------------------------------------------------------------------------- + /// @brief Get the current working directory. + /// @returns The absolute CWD or empty string if the path could not be + /// retrieved (because it was too long or deleted for example). + //--------------------------------------------------------------------------- + static std::string getCurrentWorkingDirectory(); + + //--------------------------------------------------------------------------- + /// @brief Set the current working directory + //--------------------------------------------------------------------------- + static bool setCurrentWorkingDirectory(const std::string &workingDir); + + //--------------------------------------------------------------------------- + /// @brief Returns true if the file contains any extension or false. + //--------------------------------------------------------------------------- + static bool hasFileExtension(const std::string &file); + + //--------------------------------------------------------------------------- + /// @brief Returns full path of file, Directory/Basename(.Extension, if any) + //--------------------------------------------------------------------------- + static std::string partsToString(const FilenamePartsType_t &filenameParts); +}; diff --git a/SampleApp/src/PAL/include/PAL/GetOpt.hpp b/SampleApp/src/PAL/include/PAL/GetOpt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..23a3d10edf1e59157982698748171b061112ebe5 --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/GetOpt.hpp @@ -0,0 +1,93 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +//-------------------------------------------------------------------------------- +/// @file +/// This file includes APIs for the command line parsing on supported platforms +//-------------------------------------------------------------------------------- + +#pragma once + +namespace pal { +// we implement a similar API for POSIX.2 +// so that some global var are necessary + +extern const char *g_optArg; +extern int g_optInd; + +enum { + no_argument = 0, + required_argument = 1, + optional_argument = 2, +}; + +//-------------------------------------------------------------------------------------------------- +/// @brief +/// This structure describes a single long option name for the sake of getopt_long. The argument +/// longopts must be an array of these structures, one for each long option. Terminate the array +/// with an element containing all zeros. +//-------------------------------------------------------------------------------------------------- +struct Option { + //-------------------------------------------------------------------------------------------------- + /// @brief The name of the long option. + //-------------------------------------------------------------------------------------------------- + const char *name; + + //-------------------------------------------------------------------------------------------------- + /// @brief + /// If the option does not take an argument, no_argument (or 0). + /// If the option requires an argument, required_argument (or 1). + //-------------------------------------------------------------------------------------------------- + int hasArg; + + //-------------------------------------------------------------------------------------------------- + /// @brief + /// Specifies how results are returned for a long option. + /// If flag is NULL, then GetOptLongOnly() returns val. Otherwise, it returns 0, and flag + /// points to a variable which is set to val if the option is found, but + /// left unchanged if the option is not found. + //-------------------------------------------------------------------------------------------------- + int *flag; + + //-------------------------------------------------------------------------------------------------- + /// @brief + /// The value to return, or to load into the variable pointed to by flag. + /// The last element of the array has to be filled with zeros. + //-------------------------------------------------------------------------------------------------- + int val; +}; + +//-------------------------------------------------------------------------------------------------- +/// @brief +/// This parses command-line options as POSIX getopt_long_only() +/// but we don't support optstring and optonal_argument now +/// @param argc +/// Argument count +/// @param argv +/// Argument array +/// @param optstring +/// Legitimate option characters, short options, don't support now +/// @param longopts +/// A pointer to the first element of an array of struct option, +/// has_arg field in the struct option indicates 3 possibilities, +/// no_argument, required_argument or optional_argument. we don't +/// support optional_argument now +/// @param longindex +/// If longindex is not NULL, it points to a variable which is set +/// to the index of the long option relative to longopts +/// @return +/// -1 for parsing done, '?' for non-recognized arguments, 0 for +/// flag in longopts is not NULL and saved the val to it +//-------------------------------------------------------------------------------------------------- +int getOptLongOnly(int argc, + const char *const argv[], + const char *optstring, + const struct Option *longopts, + int *longindex); + +} // namespace pal diff --git a/SampleApp/src/PAL/include/PAL/Path.hpp b/SampleApp/src/PAL/include/PAL/Path.hpp new file mode 100644 index 0000000000000000000000000000000000000000..60b10fe5dada4e56d0c427999d1f1b232ae8eeac --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/Path.hpp @@ -0,0 +1,50 @@ +//============================================================================== +// +// Copyright (c) 2008-2014, 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +//============================================================================== + +//------------------------------------------------------------------------------ +/// @file +/// The file includes APIs for path related operations on supported platforms +//------------------------------------------------------------------------------ + +#pragma once + +#include +#include + +namespace pal { +class Path; +} + +class pal::Path { + public: + //--------------------------------------------------------------------------- + /// @brief Returns path separator for the system + //--------------------------------------------------------------------------- + static char getSeparator(); + + //--------------------------------------------------------------------------- + /// @brief Concatenate s1 and s2 + //--------------------------------------------------------------------------- + static std::string combine(const std::string &s1, const std::string &s2); + + //--------------------------------------------------------------------------- + /// @brief Get the directory name + //--------------------------------------------------------------------------- + static std::string getDirectoryName(const std::string &path); + + //--------------------------------------------------------------------------- + /// @brief Get absolute path + //--------------------------------------------------------------------------- + static std::string getAbsolute(const std::string &path); + + //--------------------------------------------------------------------------- + /// @brief Check if the input path is absolute path + //--------------------------------------------------------------------------- + static bool isAbsolute(const std::string &path); + + private: +}; diff --git a/SampleApp/src/PAL/include/PAL/StringOp.hpp b/SampleApp/src/PAL/include/PAL/StringOp.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8794cda9f0fac84a77d0dbb11d10a3bcce96a230 --- /dev/null +++ b/SampleApp/src/PAL/include/PAL/StringOp.hpp @@ -0,0 +1,60 @@ +//============================================================================== +// +// Copyright (c) 2018-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +//----------------------------------------------------------------------------- +/// @file +/// The file inludes APIs for string operations on supported platforms +//----------------------------------------------------------------------------- + +#pragma once + +#include + +namespace pal { +class StringOp; +} + +//------------------------------------------------------------------------------ +/// @brief +/// FileOp contains OS Specific file system functionality. +//------------------------------------------------------------------------------ +class pal::StringOp { + public: + //--------------------------------------------------------------------------- + /// @brief + /// Copy copy_size bytes from buffer src to buffer dst. Behaviour of the + /// function is undefined if src and dst overlap. + /// @param dst + /// Destination buffer + /// @param dst_size + /// Size of destination buffer + /// @param src + /// Source buffer + /// @param copy_size + /// Number of bytes to copy + /// @return + /// Number of bytes copied + //--------------------------------------------------------------------------- + static size_t memscpy(void *dst, size_t dstSize, const void *src, size_t copySize); + + //--------------------------------------------------------------------------- + /// @brief + /// Returns a pointer to a null-terminated byte string, which contains copies + /// of at most size bytes from the string pointed to by str. If the null + /// terminator is not encountered in the first size bytes, it is added to the + /// duplicated string. + /// @param source + /// Source string + /// @param maxlen + /// Max number of bytes to copy from str + /// @return + /// A pointer to the newly allocated string, or a null pointer if an error + /// occurred. + //--------------------------------------------------------------------------- + static char *strndup(const char *source, size_t maxlen); +}; diff --git a/SampleApp/src/PAL/src/common/GetOpt.cpp b/SampleApp/src/PAL/src/common/GetOpt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..700bcdf5549c79efa4d1cd9835f831c34df87f2c --- /dev/null +++ b/SampleApp/src/PAL/src/common/GetOpt.cpp @@ -0,0 +1,154 @@ +//============================================================================= +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================= + +#include + +#include + +#include "PAL/GetOpt.hpp" + +using namespace std; + +namespace pal { + +const char *g_optArg = nullptr; +int g_optInd = 1; + +static const struct Option *findOpt(const string str, + const struct Option *longopts, + int *longindex) { + const struct Option *opt = nullptr; + int idx = 0; + size_t searchEnd = str.find_first_of("="); + + for (opt = longopts; opt->name && strlen(opt->name) > 0; opt++, idx++) { + if (str.substr(0, searchEnd) == opt->name) { + if (longindex) { + *longindex = idx; + } + break; + } + } + // if not found, opt would point to the last element of longopts + // whose name MUST be empty + return opt->name ? opt : nullptr; +} + +int getOptLongOnly(int argc, + const char *const argv[], + const char *, + const struct Option *longopts, + int *longindex) { + const struct Option *opt; + int argLen = 0; + bool isShort = false; + const char *arg = ""; + + g_optArg = nullptr; + // no arg, means the end of command + if (g_optInd >= argc) { + return -1; + } + + arg = argv[g_optInd]; + + if (arg[0] != '-') { + g_optInd += 1; + return '?'; + } + + argLen = strlen(arg); + + if (argLen < 2) { + g_optInd += 1; + return '?'; + } + + if (!longopts) { + g_optInd += 1; + return '?'; + } + + // check short options with this form, -a arg + if (argLen == 2) { + isShort = true; + // check short options with this form, -a=arg + } else if (argLen > 3 && arg[2] == '=') { + isShort = true; + // check for long options, can be used for both forms + } else if (argLen > 2 && arg[1] != '=') { + if (arg[1] != '-') { + g_optInd += 1; + return '?'; + } + isShort = false; + } + + // start after -- to find the option + const char *const optStr = isShort ? &arg[1] : &arg[2]; + opt = findOpt(optStr, longopts, longindex); + if (!opt) { + g_optInd += 1; + return '?'; + } + + if (opt->hasArg == no_argument) { + g_optInd += 1; + + if (!opt->flag) { + return opt->val; + } else { + *(opt->flag) = opt->val; + return 0; + } + } + + if (opt->hasArg == required_argument) { + string optStr = argv[g_optInd]; + size_t assignIdx = optStr.find_first_of("="); + bool advance = (assignIdx == string::npos); + + // if it is --opt arg form, this will be true, + // so we need to advance one step to get arg + // otherwise, need to stop advance step & extract arg from argv[g_optInd] + if (advance) { + g_optInd += 1; + } + + if (g_optInd >= argc) { + return '?'; + } else { + // if advance, means it is the form --opt arg + // otherwise, the form, --opt=arg + if (advance) { + // since g_optInd is advanced, g_optArg can be assigned directly + g_optArg = argv[g_optInd]; + } else { + if (assignIdx == optStr.size()) { + return '?'; + } + // for not advanced form, + // g_optArg should point to the address right after "=" + g_optArg = &argv[g_optInd][assignIdx + 1]; + } + // OK, now we are ready to handle the next pair + g_optInd += 1; + + if (!opt->flag) { + return opt->val; + } else { + *(opt->flag) = opt->val; + return 0; + } + } + } + + return '?'; +} // end of getOptLongOnly + +} // namespace pal diff --git a/SampleApp/src/PAL/src/common/StringOp.cpp b/SampleApp/src/PAL/src/common/StringOp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b8cce5044438ded32c8d05108b5cb9fe4819b293 --- /dev/null +++ b/SampleApp/src/PAL/src/common/StringOp.cpp @@ -0,0 +1,63 @@ +//============================================================================== +// +// Copyright (c) 2018-2022,2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include + +#include "PAL/StringOp.hpp" + +//--------------------------------------------------------------------------- +// pal::StringOp::memscpy +//--------------------------------------------------------------------------- +size_t pal::StringOp::memscpy(void *dst, size_t dstSize, const void *src, size_t copySize) { + if (!dst || !src || !dstSize || !copySize) return 0; + + size_t minSize = dstSize < copySize ? dstSize : copySize; + + memcpy(dst, src, minSize); + + return minSize; +} + +#ifdef __hexagon__ +size_t strnlen(const char *s, size_t n) { + size_t i; + for (i = 0; i < n && s[i] != '\0'; i++) continue; + return i; +} +#endif + +//--------------------------------------------------------------------------- +// pal::StringOp::strndup +//--------------------------------------------------------------------------- +char *pal::StringOp::strndup(const char *source, size_t maxlen) { +#ifdef _WIN32 + size_t length = ::strnlen(source, maxlen); + + char *destination = (char *)malloc((length + 1) * sizeof(char)); + if (destination == nullptr) return nullptr; + + // copy length bytes to destination and leave destination[length] to be + // null terminator + strncpy_s(destination, length + 1, source, length); + + return destination; +#elif __hexagon__ + size_t length = strnlen(source, maxlen); + + char *destination = (char *)malloc((length + 1) * sizeof(char)); + if (destination == nullptr) return nullptr; + // copy length bytes to destination and leave destination[length] to be + // null terminator + strncpy(destination, source, length); + destination[length] = '\0'; + return destination; +#else + return ::strndup(source, maxlen); +#endif +} diff --git a/SampleApp/src/PAL/src/linux/Directory.cpp b/SampleApp/src/PAL/src/linux/Directory.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9f6bd2675c72a095656ab2fafb545c2aad34e1e6 --- /dev/null +++ b/SampleApp/src/PAL/src/linux/Directory.cpp @@ -0,0 +1,153 @@ +//============================================================================== +// +// Copyright (c) 2008-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include +#include +#ifndef __QNXNTO__ +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" + +//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ +#ifdef __QNXNTO__ +static bool is_qnx_dir(const struct dirent *ep) { + struct dirent_extra *exp; + bool is_dir = false; + + for (exp = _DEXTRA_FIRST(ep); _DEXTRA_VALID(exp, ep); exp = _DEXTRA_NEXT(exp)) { + if (exp->d_type == _DTYPE_STAT || exp->d_type == _DTYPE_LSTAT) { + struct stat *statbuff = &((dirent_extra_stat *)exp)->d_stat; + if (statbuff && S_ISDIR(statbuff->st_mode)) { + is_dir = true; + break; + } + } + } + return is_dir; +} +#endif + +// ------------------------------------------------------------------------------ +// pal::Directory::create +// ------------------------------------------------------------------------------ +bool pal::Directory::create(const std::string &path, pal::Directory::DirMode dirmode) { + struct stat st; + int status = 0; + if (stat(path.c_str(), &st) != 0) { + // Directory does not exist + status = mkdir(path.c_str(), static_cast(dirmode)); + } else if (!S_ISDIR(st.st_mode)) { + errno = ENOTDIR; + status = -1; + } + return (status == 0); +} + +//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ +bool pal::Directory::remove(const std::string &dirName) { + DIR *dir; + struct dirent *entry; + + dir = opendir(dirName.c_str()); + if (dir == nullptr) { + // If the directory doesn't exist then just return true. + if (errno == ENOENT) { + return true; + } + return false; + } + +#ifdef __QNXNTO__ + if (dircntl(dir, D_SETFLAG, D_FLAG_STAT) == -1) { + return false; + } +#endif + + // Recursively traverse the directory tree. + while ((entry = readdir(dir)) != nullptr) { + if (strcmp(entry->d_name, ".") && strcmp(entry->d_name, "..")) { + std::stringstream ss; + ss << dirName << Path::getSeparator() << entry->d_name; + std::string path = ss.str(); +#ifdef __QNXNTO__ + if (is_qnx_dir(entry)) +#else + if (entry->d_type == DT_DIR) +#endif + { + // It's a directory so we need to drill down into it and delete + // its contents. + if (!remove(path)) { + return false; + } + } else { + if (::remove(path.c_str())) { + return false; + } + } + } + } + + closedir(dir); + + if (::remove(dirName.c_str())) { + return false; + } + + return true; +} + +bool pal::Directory::makePath(const std::string &path) { + struct stat st; + bool rc = false; + + if (path == ".") { + rc = true; + } else if (stat(path.c_str(), &st) == 0) { + if (st.st_mode & S_IFDIR) { + rc = true; + } + } else { + size_t offset = path.find_last_of(Path::getSeparator()); + if (offset != std::string::npos) { + std::string newPath = path.substr(0, offset); + if (!makePath(newPath)) { + return false; + } + } + + // There is a possible race condition, where a file/directory can be + // created in between the stat() above, and the mkdir() call here. + // So, ignore the return code from the mkdir() call, and then re-check + // for existence of the directory after it. Ensure both that it exists + // and that it is a directory - just like above. + mkdir(path.c_str(), 0777); + + if ((stat(path.c_str(), &st) == 0) && (st.st_mode & S_IFDIR)) { + rc = true; + } + } + + return rc; +} diff --git a/SampleApp/src/PAL/src/linux/DynamicLoading.cpp b/SampleApp/src/PAL/src/linux/DynamicLoading.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3c4085f0cdd9c72677564989989268014c7c25df --- /dev/null +++ b/SampleApp/src/PAL/src/linux/DynamicLoading.cpp @@ -0,0 +1,75 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include + +#include "PAL/Debug.hpp" +#include "PAL/DynamicLoading.hpp" + +void *pal::dynamicloading::dlOpen(const char *filename, int flags) { + int realFlags = 0; + + if (flags & DL_NOW) { + realFlags |= RTLD_NOW; + } + + if (flags & DL_LOCAL) { + realFlags |= RTLD_LOCAL; + } + + if (flags & DL_GLOBAL) { + realFlags |= RTLD_GLOBAL; + } + + return ::dlopen(filename, realFlags); +} + +void *pal::dynamicloading::dlSym(void *handle, const char *symbol) { + if (handle == DL_DEFAULT) { + return ::dlsym(RTLD_DEFAULT, symbol); + } + + return ::dlsym(handle, symbol); +} + +int pal::dynamicloading::dlAddrToLibName(void *addr, std::string &name) { + // Clean the output buffer + name = std::string(); + + // If the address is empty, return zero as treating failure + if (!addr) { + DEBUG_MSG("Input address is nullptr."); + return 0; + } + + // Dl_info do not maintain the lifetime of its string members, + // it would be maintained by dlopen() and dlclose(), + // so we do not need to release it manually + Dl_info info; + int result = ::dladdr(addr, &info); + + // If dladdr() successes, set name to the library name + if (result) { + name = std::string(info.dli_fname); + } else { + DEBUG_MSG("Input address could not be matched to a shared object."); + } + + return result; +} + +int pal::dynamicloading::dlClose(void *handle) { + if (!handle) { + return 0; + } + + return ::dlclose(handle); +} + +char *pal::dynamicloading::dlError(void) { return ::dlerror(); } diff --git a/SampleApp/src/PAL/src/linux/FileOp.cpp b/SampleApp/src/PAL/src/linux/FileOp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..908fe470a1f0bab84f90124a598798eee58561cd --- /dev/null +++ b/SampleApp/src/PAL/src/linux/FileOp.cpp @@ -0,0 +1,356 @@ +//============================================================================== +// +// Copyright (c) 2008-2013,2015,2019-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include +#include +#ifndef __QNXNTO__ +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "PAL/Debug.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" + +typedef struct stat Stat_t; + +//--------------------------------------------------------------------------- +// pal::FileOp::HasFileExtension +//--------------------------------------------------------------------------- +bool pal::FileOp::checkFileExists(const std::string& fileName) { + Stat_t sb; + + if (stat(fileName.c_str(), &sb) == -1) { + return false; + } else { + return true; + } +} + +//--------------------------------------------------------------------------- +// pal::FileOp::move +//--------------------------------------------------------------------------- +bool pal::FileOp::move(const std::string& currentName, const std::string& newName, bool overwrite) { + if (overwrite) { + remove(newName.c_str()); + } + return (rename(currentName.c_str(), newName.c_str()) == 0); +} + +//--------------------------------------------------------------------------- +// pal::FileOp::deleteFile +//--------------------------------------------------------------------------- +bool pal::FileOp::deleteFile(const std::string& fileName) { + return (remove(fileName.c_str()) == 0); +} + +//------------------------------------------------------------------------------ +// pal::FileOp::checkIsDir +//------------------------------------------------------------------------------ +bool pal::FileOp::checkIsDir(const std::string& fileName) { + bool retVal = false; + Stat_t sb; + if (stat(fileName.c_str(), &sb) == 0) { + if (sb.st_mode & S_IFDIR) { + retVal = true; + } + } + return retVal; +} + +//------------------------------------------------------------------------------ +// pal::FileOp::getFileInfo +//------------------------------------------------------------------------------ +bool pal::FileOp::getFileInfo(const std::string& filename, + pal::FileOp::FilenamePartsType_t& filenameParts) { + std::string name; + + // Clear the result + filenameParts.basename.clear(); + filenameParts.extension.clear(); + filenameParts.directory.clear(); + + size_t lastPathSeparator = filename.find_last_of(Path::getSeparator()); + if (lastPathSeparator == std::string::npos) { + // No directory + name = filename; + } else { + // has a directory part + filenameParts.directory = filename.substr(0, lastPathSeparator); + name = filename.substr(lastPathSeparator + 1); + } + + size_t ext = name.find_last_of("."); + if (ext == std::string::npos) { + // no extension + filenameParts.basename = name; + } else { + // has extension + filenameParts.basename = name.substr(0, ext); + filenameParts.extension = name.substr(ext + 1); + } + + return true; +} + +//--------------------------------------------------------------------------- +// pal::FileOp::copyOverFile +//--------------------------------------------------------------------------- +bool pal::FileOp::copyOverFile(const std::string& fromFile, const std::string& toFile) { + bool rc = false; + int readFd; + int writeFd; + struct stat statBuf; + + // Open the input file. + readFd = ::open(fromFile.c_str(), O_RDONLY); + if (readFd == -1) { + close(readFd); + return false; + } + + // Stat the input file to obtain its size. */ + if (fstat(readFd, &statBuf) != 0) { + close(readFd); + return false; + } + + // Open the output file for writing, with the same permissions as the input + writeFd = ::open(toFile.c_str(), O_WRONLY | O_CREAT | O_TRUNC, statBuf.st_mode); + if (writeFd == -1) { + close(readFd); + return false; + } + + // Copy the file in a non-kernel specific way */ + char fileBuf[8192]; + ssize_t rBytes, wBytes; + while (true) { + rBytes = read(readFd, fileBuf, sizeof(fileBuf)); + + if (!rBytes) { + rc = true; + break; + } + + if (rBytes < 0) { + rc = false; + break; + } + + wBytes = write(writeFd, fileBuf, (size_t)rBytes); + + if (!wBytes) { + rc = true; + break; + } + + if (wBytes < 0) { + rc = false; + break; + } + } + + /* Close up. */ + close(readFd); + close(writeFd); + return rc; +} + +static bool getFileInfoListRecursiveImpl(const std::string& path, + pal::FileOp::FilenamePartsListType_t& filenamePartsList, + const bool ignoreDirs, + size_t maxDepth) { + struct dirent** namelist = nullptr; + int entryCount = 0; + + // Base case + if (maxDepth == 0) { + return true; + } + +#ifdef __ANDROID__ + // android dirent.h has the wrong signature for alphasort so it had to be disabled or fixed + entryCount = scandir(path.c_str(), &namelist, 0, 0); +#else + entryCount = scandir(path.c_str(), &namelist, 0, alphasort); +#endif + if (entryCount < 0) { + return false; + } else { + while (entryCount--) { + const std::string dName(namelist[entryCount]->d_name); + free(namelist[entryCount]); + + // skip current directory, prev directory and empty string + if (dName.empty() || dName == "." || dName == "..") { + continue; + } + + std::string curPath = path; + curPath += pal::Path::getSeparator(); + curPath += dName; + + // recurse if directory but avoid symbolic links to directories + if (pal::FileOp::checkIsDir(curPath)) { + Stat_t sb; + if (lstat(curPath.c_str(), &sb) == 0 && S_ISDIR(sb.st_mode)) { + if (!getFileInfoListRecursiveImpl(curPath, filenamePartsList, ignoreDirs, maxDepth - 1)) { + return false; + } + } + + if (ignoreDirs) { + continue; + } + + // Append training / to make this path look like a directory for + // getFileInfo() + if (curPath.back() != pal::Path::getSeparator()) { + curPath += pal::Path::getSeparator(); + } + } + + // add to vector + pal::FileOp::FilenamePartsType_t filenameParts; + if (pal::FileOp::getFileInfo(curPath, filenameParts)) { + filenamePartsList.push_back(filenameParts); + } + } + + free(namelist); + } + + return true; +} + +//--------------------------------------------------------------------------- +// pal::FileOp::getFileInfoList +//--------------------------------------------------------------------------- +bool pal::FileOp::getFileInfoList(const std::string& path, + FilenamePartsListType_t& filenamePartsList) { + return getFileInfoListRecursiveImpl(path, filenamePartsList, false, 1); +} + +//--------------------------------------------------------------------------- +// pal::FileOp::getFileInfoListRecursive +//--------------------------------------------------------------------------- +bool pal::FileOp::getFileInfoListRecursive(const std::string& path, + FilenamePartsListType_t& filenamePartsList, + const bool ignoreDirs) { + return getFileInfoListRecursiveImpl( + path, filenamePartsList, ignoreDirs, std::numeric_limits::max()); +} + +//--------------------------------------------------------------------------- +// pal::FileOp::getAbsolutePath +//--------------------------------------------------------------------------- +std::string pal::FileOp::getAbsolutePath(const std::string& path) { + // NOTE: This implementation is broken currently when a path with + // non-existant components is passed! NEO-19723 was created to address. + char absPath[PATH_MAX + 1] = {0}; + + if (realpath(path.c_str(), absPath) == NULL) { + DEBUG_MSG("GetAbsolute path fail! Error code : %d", errno); + return std::string(); + } + return std::string(absPath); +} + +//--------------------------------------------------------------------------- +// pal::FileOp::setCWD +//--------------------------------------------------------------------------- +bool pal::FileOp::setCurrentWorkingDirectory(const std::string& workingDir) { + return chdir(workingDir.c_str()) == 0; +} + +//--------------------------------------------------------------------------- +// pal::FileOp::getDirectory +//--------------------------------------------------------------------------- +std::string pal::FileOp::getDirectory(const std::string& file) { + std::string rc = file; + size_t offset = file.find_last_of(Path::getSeparator()); + if (offset != std::string::npos) { + rc = file.substr(0, offset); + } + return rc; +} + +//--------------------------------------------------------------------------- +// pal::FileOp::getFileName +//--------------------------------------------------------------------------- +std::string pal::FileOp::getFileName(const std::string& file) { + std::string rc = file; + size_t offset = file.find_last_of(Path::getSeparator()); + if (offset != std::string::npos) { + rc = file.substr(offset + 1); // +1 to skip path separator + } + return rc; +} + +//--------------------------------------------------------------------------- +// pal::FileOp::hasFileExtension +//--------------------------------------------------------------------------- +bool pal::FileOp::hasFileExtension(const std::string& file) { + FilenamePartsType_t parts; + getFileInfo(file, parts); + + return !parts.extension.empty(); +} + +//--------------------------------------------------------------------------- +// pal::FileOp::getCWD +//--------------------------------------------------------------------------- +std::string pal::FileOp::getCurrentWorkingDirectory() { + char buffer[PATH_MAX + 1]; + buffer[0] = '\0'; + + // If there is any failure return empty string. It is technically possible + // to handle paths exceeding PATH_MAX on some flavors of *nix but platforms + // like Android (Bionic) do no provide such capability. For consistency we + // will not handle extra long path names. + if (nullptr == getcwd(buffer, PATH_MAX)) { + return std::string(); + } else { + return std::string(buffer); + } +} + +//--------------------------------------------------------------------------- +// pal::FileOp::partsToString +//--------------------------------------------------------------------------- +std::string pal::FileOp::partsToString(const FilenamePartsType_t& filenameParts) { + std::string path; + + if (!filenameParts.directory.empty()) { + path += filenameParts.directory; + path += Path::getSeparator(); + } + if (!filenameParts.basename.empty()) { + path += filenameParts.basename; + } + if (!filenameParts.extension.empty()) { + path += "."; + path += filenameParts.extension; + } + return path; +} diff --git a/SampleApp/src/PAL/src/linux/Path.cpp b/SampleApp/src/PAL/src/linux/Path.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1a46b7a6b2c4df4b132ebd5cbb82707b9d4b739f --- /dev/null +++ b/SampleApp/src/PAL/src/linux/Path.cpp @@ -0,0 +1,48 @@ +//============================================================================== +// +// Copyright (c) 2008-2014, 2015, 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include + +#include +#ifndef PATH_MAX +#include +#endif + +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" + +char pal::Path::getSeparator() { return '/'; } + +std::string pal::Path::combine(const std::string &s1, const std::string &s2) { + std::stringstream ss; + ss << s1; + if (s1.size() > 0 && s1[s1.size() - 1] != getSeparator()) { + ss << getSeparator(); + } + ss << s2; + return ss.str(); +} + +std::string pal::Path::getDirectoryName(const std::string &path) { + std::string rc = path; + size_t index = path.find_last_of(pal::Path::getSeparator()); + if (index != std::string::npos) { + rc = path.substr(0, index); + } + return rc; +} + +std::string pal::Path::getAbsolute(const std::string &path) { + // Functionality was duplicated of function in FileOp + // Just call that function directly instead + return pal::FileOp::getAbsolutePath(path); +} + +bool pal::Path::isAbsolute(const std::string &path) { + return path.size() > 0 && path[0] == getSeparator(); +} diff --git a/SampleApp/src/PAL/src/windows/Common.cpp b/SampleApp/src/PAL/src/windows/Common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..27cf17bbfe36f3ab6adc546c0cd84ef0d9380934 --- /dev/null +++ b/SampleApp/src/PAL/src/windows/Common.cpp @@ -0,0 +1,46 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "Common.hpp" +#include "PAL/Debug.hpp" + +int32_t pal::scanDir(const std::string &path, std::vector &namelist) { + // example : "C:/Users/guest" scan nothing, "C:/Users/guest/*" can scan the + // entire directory instead + std::string scanPath = path + "/*"; + WIN32_FIND_DATAA findFileData; + HANDLE hFind = FindFirstFileA(scanPath.c_str(), &findFileData); + if (hFind == INVALID_HANDLE_VALUE) { + DEBUG_MSG("scanDir fail! Error code : %d", GetLastError()); + return -1; + } + + do { + // will compare char until '\0' to allow filename with first char = '.' + if (strncmp(findFileData.cFileName, ".", 2) == 0 || + strncmp(findFileData.cFileName, "..", 3) == 0) { + continue; + } + namelist.push_back(findFileData); + } while (FindNextFileA(hFind, &findFileData)); + FindClose(hFind); + + return namelist.size(); +} + +void pal::normalizeSeparator(std::string &path) { replace(path.begin(), path.end(), '\\', '/'); } diff --git a/SampleApp/src/PAL/src/windows/Common.hpp b/SampleApp/src/PAL/src/windows/Common.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3f1d5780673e155d226e2ce863f9dcee92ca742c --- /dev/null +++ b/SampleApp/src/PAL/src/windows/Common.hpp @@ -0,0 +1,37 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include +#include + +#include +#include + +namespace pal { +/** + * @brief + * Scans elements in a directory. + * @param path + * Path in string which we are going to scan. + * @param namelist + * Data struct for each element, which will be stored as WIN32_FIND_DATAA. + * @return + * Number of elements in this path, return -1 if fail. + */ +int32_t scanDir(const std::string &path, std::vector &namelist); + +/** + * @brief + * Replace all the '\\' in path with '/' to keep consistency. + * @param path + * The string which you want to format. + */ +void normalizeSeparator(std::string &path); +} // namespace pal diff --git a/SampleApp/src/PAL/src/windows/Directory.cpp b/SampleApp/src/PAL/src/windows/Directory.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb0b5335e73ec91cc2ffbfa5d34e7c8a5e016993 --- /dev/null +++ b/SampleApp/src/PAL/src/windows/Directory.cpp @@ -0,0 +1,105 @@ +//===================================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//===================================================================================== + +#include +#include +#include + +#include +#include + +#include "Common.hpp" +#include "PAL/Debug.hpp" +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" + +//-------------------------------------------------------------------------------------- +// pal::Directory::Create +//-------------------------------------------------------------------------------------- +bool pal::Directory::create(const std::string &path, pal::Directory::DirMode dirmode) { + struct stat st; + // it create a directory successfully or directory exists already, return true. + if ((stat(path.c_str(), &st) != 0 && (CreateDirectoryA(path.c_str(), NULL) != 0)) || + ((st.st_mode & S_IFDIR) != 0)) { + return true; + } else { + DEBUG_MSG("Create Folder fail! Error code : %d", GetLastError()); + } + return false; +} + +//-------------------------------------------------------------------------------------- +// pal::Directory::Remove +//-------------------------------------------------------------------------------------- +bool pal::Directory::remove(const std::string &dirName) { + struct stat st; + if (stat(dirName.c_str(), &st) == 0) { + if ((st.st_mode & S_IFDIR) != 0) { + // a directory exist and remove it ! + std::string fullPath = dirName; + if (pal::Path::isAbsolute(dirName) == 0) { + fullPath = pal::Path::getAbsolute(dirName); + } + // Note This string MUST be double-null terminated. + fullPath = fullPath + '\0' + '\0'; + SHFILEOPSTRUCTA fileOp = { + NULL, // hwnd + FO_DELETE, // wFunc, delete usage + fullPath.c_str(), // pFrom, delete target folder + "", // pTo, delete operation can ignore this + FOF_NO_UI, // Perform operation silently, presenting no UI to user + false, // fAnyOperationsAborted, + 0, // hNameMappings + "" // lpszProgressTitle, used only if for FOF_SIMPLEPROGRESS + }; + if (SHFileOperationA(&fileOp) == 0) { + return true; + } else { + DEBUG_MSG("Delete folder fail! Error code : %d", GetLastError()); + } + } + } else { + // If the directory doesn't exist then just, return true. Behaves like Linux + if (errno == ENOENT) { + return true; + } else { + DEBUG_MSG("Remove stat fail! Error code : %d", errno); + } + } + return false; +} + +//-------------------------------------------------------------------------------------- +// pal::Directory::MakePath +//-------------------------------------------------------------------------------------- +bool pal::Directory::makePath(const std::string &path) { + struct stat st; + bool rc = false; + if (path == ".") { + rc = true; + } else if (stat(path.c_str(), &st) == 0) { + if ((st.st_mode & S_IFDIR) != 0) { + // if a directory path is already exist + rc = true; + } + } else { + size_t offset = std::min(path.find_last_of('/'), path.find_last_of('\\')); + if (offset != std::string::npos) { + std::string newPath = path.substr(0, offset); + if (!makePath(newPath)) { + return false; + } + } + pal::Directory::create(path.c_str()); + if ((stat(path.c_str(), &st) == 0) && ((st.st_mode & S_IFDIR) != 0)) { + rc = true; + } + } + return rc; +} \ No newline at end of file diff --git a/SampleApp/src/PAL/src/windows/DynamicLoading.cpp b/SampleApp/src/PAL/src/windows/DynamicLoading.cpp new file mode 100644 index 0000000000000000000000000000000000000000..73dd609468644d14a1759846440fed957d57b3b4 --- /dev/null +++ b/SampleApp/src/PAL/src/windows/DynamicLoading.cpp @@ -0,0 +1,220 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +// clang-format off +#include +#include +#include +#include +#include +// clang-format on + +#include +#include + +#include "PAL/Debug.hpp" +#include "PAL/DynamicLoading.hpp" + +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) + +static std::set mod_handles; +static thread_local char *sg_lastErrMsg = ""; + +void *pal::dynamicloading::dlOpen(const char *filename, int flags) { + HMODULE mod; + HANDLE cur_proc; + DWORD as_is, to_be; + bool loadedBefore = false; + + if (!filename || ::strlen(filename) == 0) { + // TODO: we don't support empty filename now + sg_lastErrMsg = "filename is null or empty"; + return NULL; + } + + // POSIX asks one of symbol resolving approaches: + // NOW or LAZY must be specified + if (!(flags & DL_NOW)) { + // TODO: since Windows does not provide existing API so lazy + // symbol resolving needs to do relocation by ourself + // that would be too costly. SNPE didn't use this feature now + // , wait until we really need it. keep the flexibility here + // ask caller MUST pass DL_NOW + sg_lastErrMsg = "flags must include DL_NOW"; + return NULL; + } + + cur_proc = GetCurrentProcess(); + + if (EnumProcessModules(cur_proc, NULL, 0, &as_is) == 0) { + sg_lastErrMsg = "enumerate modules failed before loading module"; + return NULL; + } + + // search from system lib path first + mod = LoadLibraryExA(filename, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + if (!mod) { + sg_lastErrMsg = "load library failed"; + return NULL; + } + + if (EnumProcessModules(cur_proc, NULL, 0, &to_be) == 0) { + sg_lastErrMsg = "enumerate modules failed after loading module"; + FreeLibrary(mod); + return NULL; + } + + if (as_is == to_be) { + loadedBefore = true; + } + + // (not loadedBefore) and DL_LOCAL means this lib was not loaded yet + // add it into the local set + // + // If loadedBefore and DL_LOCAL, means this lib was already loaded + // 2 cases here for how it was loaded before: + // a. with DL_LOCAL, just ignore since it was already in local set + // b. with DL_GLOBAL, POSIX asks it in global, ignore it, too + if ((!loadedBefore) && (flags & DL_LOCAL)) { + mod_handles.insert(mod); + } + + // once callers ask for global, needs to be in global thereafter + // so the lib should be removed from local set + if (flags & DL_GLOBAL) { + mod_handles.erase(mod); + } + + return static_cast(mod); +} + +void *pal::dynamicloading::dlSym(void *handle, const char *symbol) { + FARPROC sym_addr = NULL; + HANDLE cur_proc; + DWORD size, size_needed; + HMODULE *mod_list; + HMODULE mod = 0; + + if ((!handle) || (!symbol)) { + return NULL; + } + + cur_proc = GetCurrentProcess(); + + if (EnumProcessModules(cur_proc, NULL, 0, &size) == 0) { + sg_lastErrMsg = "enumerate modules failed before memory allocation"; + return NULL; + } + + mod_list = static_cast(malloc(size)); + if (!mod_list) { + sg_lastErrMsg = "malloc failed"; + return NULL; + } + + if (EnumProcessModules(cur_proc, mod_list, size, &size_needed) == 0) { + sg_lastErrMsg = "enumerate modules failed after memory allocation"; + free(mod_list); + return NULL; + } + + // DL_DEFAULT needs to bypass those modules with DL_LOCAL flag + if (handle == DL_DEFAULT) { + for (size_t i = 0; i < (size / sizeof(HMODULE)); i++) { + auto iter = mod_handles.find(mod_list[i]); + if (iter != mod_handles.end()) { + continue; + } + // once find the first non-local module with symbol + // return its address here to avoid unnecessary looping + sym_addr = GetProcAddress(mod_list[i], symbol); + if (sym_addr) { + free(mod_list); + return *(void **)(&sym_addr); + } + } + } else { + mod = static_cast(handle); + } + + free(mod_list); + sym_addr = GetProcAddress(mod, symbol); + if (!sym_addr) { + sg_lastErrMsg = "can't resolve symbol"; + return NULL; + } + + return *(void **)(&sym_addr); +} + +int pal::dynamicloading::dlAddrToLibName(void *addr, std::string &name) { + // Clean the output buffer + name = std::string(); + + // If the address is empty, return zero as treating failure + if (!addr) { + DEBUG_MSG("Input address is nullptr."); + return 0; + } + + HMODULE hModule = NULL; + // TODO: Need to use TCHAR for the compatibility of ASCII and Unicode + CHAR nameBuf[MAX_PATH]; + + // (1st flag) The lpModuleName parameter is an address in the module + // (2nd flag) The reference count for the module is not incremented + DWORD flags = + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT; + + // Retrieves a module handle for the specified module by its symbol address + if (!GetModuleHandleExA(flags, reinterpret_cast(addr), &hModule) || hModule == NULL) { + DEBUG_MSG("Failed to get module handle. Error code: %d", GetLastError()); + return 0; + } + + // Retrieves the fully qualified path for the file that contains the specified module + DWORD dwSize = GetModuleFileNameA(hModule, nameBuf, sizeof(nameBuf)); + + // dwSize == 0 indicates function failure + // If the path is too long (greater than MAX_PATH), treat it as failure + if (dwSize == 0 || ERROR_INSUFFICIENT_BUFFER == GetLastError()) { + DEBUG_MSG("Failed to get module file name. Error code: %d", GetLastError()); + return 0; + } + + name = std::string(nameBuf); + + // Return a non-zero value to represent the function successes + return 1; +} + +int pal::dynamicloading::dlClose(void *handle) { + if (!handle) { + return 0; + } + + HMODULE mod = static_cast(handle); + + if (FreeLibrary(mod) == 0) { + sg_lastErrMsg = "free library failed"; + return -1; + } + + mod_handles.erase(mod); + + return 0; +} + +char *pal::dynamicloading::dlError(void) { + char *retStr = sg_lastErrMsg; + + sg_lastErrMsg = ""; + + return retStr; +} diff --git a/SampleApp/src/PAL/src/windows/FileOp.cpp b/SampleApp/src/PAL/src/windows/FileOp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..62ddbe56a9cb3853f35ade5477e7b3f58588f17c --- /dev/null +++ b/SampleApp/src/PAL/src/windows/FileOp.cpp @@ -0,0 +1,297 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "Common.hpp" +#include "PAL/Debug.hpp" +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" + +//------------------------------------------------------------------------------- +// pal::FileOp::checkFileExists +//------------------------------------------------------------------------------- +bool pal::FileOp::checkFileExists(const std::string &fileName) { + struct stat st; + if (stat(fileName.c_str(), &st) != 0) { + DEBUG_MSG("Check File fail! Error code : %d", errno); + return false; + } + return true; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::copyOverFile +//------------------------------------------------------------------------------- +bool pal::FileOp::copyOverFile(const std::string &fromFile, const std::string &toFile) { + if (CopyFileA(fromFile.c_str(), toFile.c_str(), 0) == 0) { + DEBUG_MSG("Copy file fail! Error code : %d", GetLastError()); + return false; + } + return true; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::move +//------------------------------------------------------------------------------- +bool pal::FileOp::move(const std::string ¤tName, const std::string &newName, bool overwrite) { + struct stat st; + // if currentName doesn't exist, return false in case newName got deleted + if (stat(currentName.c_str(), &st) != 0) { + DEBUG_MSG("CurrentName check status fail! Error code : %d", errno); + return false; + } + if (stat(newName.c_str(), &st) == 0) { + if ((st.st_mode & S_IFDIR) != 0) { + // if newName is directory and overwrite = false, cannot move, return false + // if newName is directory and overwrite = true, delete it and rename + if (overwrite == false) { + return false; + } + pal::Directory::remove(newName); + } else { + deleteFile(newName); + } + } + // in windows, if newName exist already, rename will return -1 + // only when newName doesn't exist, rename will return 0 + return (rename(currentName.c_str(), newName.c_str()) == 0); +} + +//------------------------------------------------------------------------------- +// pal::FileOp::deleteFile +//------------------------------------------------------------------------------- +bool pal::FileOp::deleteFile(const std::string &fileName) { + return (DeleteFileA(fileName.c_str()) != 0); +} + +//------------------------------------------------------------------------------- +// pal::FileOp::checkIsDir +//------------------------------------------------------------------------------- +bool pal::FileOp::checkIsDir(const std::string &fileName) { + DWORD result = GetFileAttributesA(fileName.c_str()); + if (result == static_cast(FILE_INVALID_FILE_ID)) { + DEBUG_MSG("File attribute is invalid_file_id!"); + return false; + } + return (result & FILE_ATTRIBUTE_DIRECTORY) != 0; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getFileInfo +//------------------------------------------------------------------------------- +bool pal::FileOp::getFileInfo(const std::string &filename, + pal::FileOp::FilenamePartsType_t &filenameParts) { + std::string name; + int32_t lastPathSeparator = std::max(static_cast(filename.find_last_of('\\')), + static_cast(filename.find_last_of('/'))); + if (lastPathSeparator == static_cast(std::string::npos)) { + // No directory + name = filename; + } else { + // has a directory part + filenameParts.directory = filename.substr(0, lastPathSeparator); + name = filename.substr(lastPathSeparator + 1); + } + + size_t ext = name.find_last_of("."); + if (ext == std::string::npos) { + // no extension + filenameParts.basename = name; + } else { + // has extension + filenameParts.basename = name.substr(0, ext); + filenameParts.extension = name.substr(ext + 1); + } + pal::normalizeSeparator(filenameParts.directory); + return true; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getFileInfoListRecursiveImpl +//------------------------------------------------------------------------------- +static bool getFileInfoListRecursiveImpl(const std::string &path, + pal::FileOp::FilenamePartsListType_t &filenamePartsList, + const bool ignoreDirs, + size_t maxDepth) { + // base case + if (maxDepth == 0) { + return true; + } + if (pal::FileOp::checkIsDir(path) == false) { + return false; + } + int32_t entryCount = 0; + std::vector nameList; + entryCount = pal::scanDir(path.c_str(), nameList); + if (entryCount < 0) { + return false; + } + while (entryCount--) { + const std::string dName = std::string(nameList[entryCount].cFileName); + // skip current directory, previous directory and empty string + if (dName.empty() || dName == "." || dName == "..") { + continue; + } + std::string curPath = path + pal::Path::getSeparator() + dName; + // recursive if directory but avoid symbolic links to directories + if (pal::FileOp::checkIsDir(curPath)) { + struct stat st; + if (stat(curPath.c_str(), &st) == 0 && ((st.st_mode & S_IFDIR) != 0) && + (!getFileInfoListRecursiveImpl(curPath, filenamePartsList, ignoreDirs, maxDepth - 1))) { + return false; + } + if (curPath.back() != pal::Path::getSeparator()) { + curPath += pal::Path::getSeparator(); + } + // continue here to prevent this object from adding filenameparts in + // vector but we still need this directory to go recursive + if (ignoreDirs) { + continue; + } + } + // add to vector + pal::FileOp::FilenamePartsType_t filenameParts = {std::string(), std::string(), std::string()}; + if (pal::FileOp::getFileInfo(curPath, filenameParts)) { + filenamePartsList.push_back(filenameParts); + } + } + return true; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getFileInfoList +//------------------------------------------------------------------------------- +bool pal::FileOp::getFileInfoList(const std::string &path, + FilenamePartsListType_t &filenamePartsList) { + return getFileInfoListRecursiveImpl(path, filenamePartsList, false, 1); +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getFileInfoListRecursive +//------------------------------------------------------------------------------- +bool pal::FileOp::getFileInfoListRecursive(const std::string &path, + FilenamePartsListType_t &filenamePartsList, + const bool ignoreDirs) { + return getFileInfoListRecursiveImpl(path, filenamePartsList, ignoreDirs, UINT_MAX); +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getAbsolutePath +//------------------------------------------------------------------------------- +std::string pal::FileOp::getAbsolutePath(const std::string &path) { + char fullPath[MAX_PATH]; + if (_fullpath(fullPath, path.c_str(), MAX_PATH) == NULL) { + DEBUG_MSG("GetAbsolute path fail! Error code : %d", errno); + return std::string(); + } + std::string reStr = std::string(fullPath); + pal::normalizeSeparator(reStr); + return reStr; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getDirectory +//------------------------------------------------------------------------------- +std::string pal::FileOp::getDirectory(const std::string &file) { + std::string rc = file; + int32_t index = std::max(static_cast(file.find_last_of('\\')), + static_cast(file.find_last_of('/'))); + if (index != static_cast(std::string::npos)) { + rc = file.substr(0, index); + } + pal::normalizeSeparator(rc); + return rc; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::GetFileName +//------------------------------------------------------------------------------- +std::string pal::FileOp::getFileName(const std::string &file) { + std::string rc = file; + int32_t index = std::max(static_cast(file.find_last_of('\\')), + static_cast(file.find_last_of('/'))); + if (index != static_cast(std::string::npos)) { + rc = file.substr(index + 1); // +1 to skip path separator + } + return rc; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::hasFileExtension +//------------------------------------------------------------------------------- +bool pal::FileOp::hasFileExtension(const std::string &file) { + FilenamePartsType_t parts = {std::string(), std::string(), std::string()}; + getFileInfo(file, parts); + return !parts.extension.empty(); +} + +//------------------------------------------------------------------------------- +// pal::FileOp::getCurrentWorkingDirectory +//------------------------------------------------------------------------------- +std::string pal::FileOp::getCurrentWorkingDirectory() { + char buffer[MAX_PATH + 1]; + buffer[0] = '\0'; + + // If there is any failure return empty string. It is technically possible + // to handle paths exceeding PATH_MAX on some flavors of *nix but platforms + // like Android (Bionic) do no provide such capability. For consistency we + // will not handle extra long path names. + if (0 == GetCurrentDirectoryA(MAX_PATH, buffer)) { + DEBUG_MSG("Get current working directory fail! Error code : %d", GetLastError()); + return std::string(); + } + std::string res = std::string(buffer); + pal::normalizeSeparator(res); + return res; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::setCurrentWorkingDirectory +//------------------------------------------------------------------------------- +bool pal::FileOp::setCurrentWorkingDirectory(const std::string &workingDir) { + return _chdir(workingDir.c_str()) == 0; +} + +//------------------------------------------------------------------------------- +// pal::FileOp::PartsToString +//------------------------------------------------------------------------------- +std::string pal::FileOp::partsToString(const FilenamePartsType_t &filenameParts) { + std::string path; + + if (!filenameParts.directory.empty()) { + path += filenameParts.directory; + path += Path::getSeparator(); + } + if (!filenameParts.basename.empty()) { + path += filenameParts.basename; + } + if (!filenameParts.extension.empty()) { + path += "."; + path += filenameParts.extension; + } + pal::normalizeSeparator(path); + return path; +} \ No newline at end of file diff --git a/SampleApp/src/PAL/src/windows/Path.cpp b/SampleApp/src/PAL/src/windows/Path.cpp new file mode 100644 index 0000000000000000000000000000000000000000..073f97e8ef7d8d878b0bb14f12bc28c49f0c82d1 --- /dev/null +++ b/SampleApp/src/PAL/src/windows/Path.cpp @@ -0,0 +1,72 @@ +//============================================================================== +// +// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include + +#include +#include +#include + +#include "Common.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" + +//------------------------------------------------------------------------------ +// PAL::Path::GetSeparator +//------------------------------------------------------------------------------ +char pal::Path::getSeparator() { return '/'; } + +//------------------------------------------------------------------------------ +// pal::Path::Combine +//------------------------------------------------------------------------------ +std::string pal::Path::combine(const std::string &s1, const std::string &s2) { + std::stringstream ss; + ss << s1; + if (s1.size() > 0 && ((s1[s1.size() - 1] != '/') && (s1[s1.size() - 1] != '\\'))) { + ss << getSeparator(); + } + ss << s2; + return ss.str(); +} + +//------------------------------------------------------------------------------ +// pal::Path::getDirectoryName +//------------------------------------------------------------------------------ +std::string pal::Path::getDirectoryName(const std::string &path) { + std::string rc = path; + int32_t index = std::max(static_cast(path.find_last_of('\\')), + static_cast(path.find_last_of('/'))); + if (index != static_cast(std::string::npos)) { + rc = path.substr(0, index); + } + pal::normalizeSeparator(rc); + return rc; +} + +//------------------------------------------------------------------------------ +// pal::Path::getAbsolute +//------------------------------------------------------------------------------ +std::string pal::Path::getAbsolute(const std::string &path) { + std::string res = pal::FileOp::getAbsolutePath(path); + pal::normalizeSeparator(res); + return res; +} + +//------------------------------------------------------------------------------ +// PAL::Path::isAbsolute +// requirement : shlwapi.lib +//------------------------------------------------------------------------------ +bool pal::Path::isAbsolute(const std::string &path) { + std::string windowsPath = path; + // in windows, when we need to check relative or absolute path, + // separator MUST be '\\' rather than '/' + // for more information : https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats + replace(windowsPath.begin(), windowsPath.end(), '/', '\\'); + return PathIsRelativeA(windowsPath.c_str()) == false; +} diff --git a/SampleApp/src/QnnSampleApp.cpp b/SampleApp/src/QnnSampleApp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..263bc2ac8c7a8d29767f94f62104acfce9101f49 --- /dev/null +++ b/SampleApp/src/QnnSampleApp.cpp @@ -0,0 +1,668 @@ +//============================================================================== +// +// Copyright (c) 2019-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include + +#include +#include +#include + +#include "DataUtil.hpp" +#include "Logger.hpp" +#ifndef __hexagon__ +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" +#endif +#include "PAL/StringOp.hpp" +#include "QnnSampleApp.hpp" +#include "QnnSampleAppUtils.hpp" +#include "QnnWrapperUtils.hpp" + +using namespace qnn; +using namespace qnn::tools; + +// Default path where the outputs will be stored if outputPath is +// not supplied. +const std::string sample_app::QnnSampleApp::s_defaultOutputPath = "./output/"; + +sample_app::QnnSampleApp::QnnSampleApp(QnnFunctionPointers qnnFunctionPointers, + std::string inputListPaths, + std::string opPackagePaths, + void* backendLibraryHandle, + std::string outputPath, + bool debug, + iotensor::OutputDataType outputDataType, + iotensor::InputDataType inputDataType, + sample_app::ProfilingLevel profilingLevel, + bool dumpOutputs, + std::string cachedBinaryPath, + std::string saveBinaryName) + : m_qnnFunctionPointers(qnnFunctionPointers), + m_outputPath(outputPath), + m_saveBinaryName(saveBinaryName), + m_cachedBinaryPath(cachedBinaryPath), + m_debug(debug), + m_outputDataType(outputDataType), + m_inputDataType(inputDataType), + m_profilingLevel(profilingLevel), + m_dumpOutputs(dumpOutputs), + m_backendLibraryHandle(backendLibraryHandle), + m_isBackendInitialized(false), + m_isContextCreated(false) { + split(m_inputListPaths, inputListPaths, ','); + split(m_opPackagePaths, opPackagePaths, ','); + if (m_outputPath.empty()) { + m_outputPath = s_defaultOutputPath; + } + return; +} + +sample_app::QnnSampleApp::~QnnSampleApp() { + // Free Profiling object if it was created + if (nullptr != m_profileBackendHandle) { + QNN_DEBUG("Freeing backend profile object."); + if (QNN_PROFILE_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.profileFree(m_profileBackendHandle)) { + QNN_ERROR("Could not free backend profile handle."); + } + } + // Free context if not already done + if (m_isContextCreated) { + QNN_DEBUG("Freeing context"); + if (QNN_CONTEXT_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.contextFree(m_context, nullptr)) { + QNN_ERROR("Could not free context"); + } + } + m_isContextCreated = false; + // Terminate backend + if (m_isBackendInitialized && nullptr != m_qnnFunctionPointers.qnnInterface.backendFree) { + QNN_DEBUG("Freeing backend"); + if (QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendFree(m_backendHandle)) { + QNN_ERROR("Could not free backend"); + } + } + m_isBackendInitialized = false; + // Terminate logging in the backend + if (nullptr != m_qnnFunctionPointers.qnnInterface.logFree && nullptr != m_logHandle) { + if (QNN_SUCCESS != m_qnnFunctionPointers.qnnInterface.logFree(m_logHandle)) { + QNN_WARN("Unable to terminate logging in the backend."); + } + } + return; +} + +std::string sample_app::QnnSampleApp::getBackendBuildId() { + char* backendBuildId{nullptr}; + if (QNN_SUCCESS != + m_qnnFunctionPointers.qnnInterface.backendGetBuildId((const char**)&backendBuildId)) { + QNN_ERROR("Unable to get build Id from the backend."); + } + return (backendBuildId == nullptr ? std::string("") : std::string(backendBuildId)); +} + +// Initialize QnnSampleApp. Things it does: +// 1. Create output directory +// 2. Read all input list paths provided +// during creation. +sample_app::StatusCode sample_app::QnnSampleApp::initialize() { + // Create Output Directory +#ifndef __hexagon__ + if (m_dumpOutputs && !::pal::FileOp::checkFileExists(m_outputPath) && + !pal::Directory::makePath(m_outputPath)) { + exitWithMessage("Could not create output directory: " + m_outputPath, EXIT_FAILURE); + } +#endif + // Read Input File List + bool readSuccess; + std::tie(m_inputFileLists, m_inputNameToIndex, readSuccess) = readInputLists(m_inputListPaths); + if (!readSuccess) { + exitWithMessage("Could not read input lists", EXIT_FAILURE); + } + // initialize logging in the backend + if (log::isLogInitialized()) { + auto logCallback = log::getLogCallback(); + auto logLevel = log::getLogLevel(); + QNN_INFO("Initializing logging in the backend. Callback: [%p], Log Level: [%d]", + logCallback, + logLevel); + if (QNN_SUCCESS != + m_qnnFunctionPointers.qnnInterface.logCreate(logCallback, logLevel, &m_logHandle)) { + QNN_WARN("Unable to initialize logging in the backend."); + } + } else { + QNN_WARN("Logging not available in the backend."); + } + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::initializeProfiling() { + if (ProfilingLevel::OFF != m_profilingLevel) { + QNN_INFO("Profiling turned on; level = %d", m_profilingLevel); + if (ProfilingLevel::BASIC == m_profilingLevel) { + QNN_INFO("Basic profiling requested. Creating Qnn Profile object."); + if (QNN_PROFILE_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.profileCreate( + m_backendHandle, QNN_PROFILE_LEVEL_BASIC, &m_profileBackendHandle)) { + QNN_WARN("Unable to create profile handle in the backend."); + return StatusCode::FAILURE; + } + } else if (ProfilingLevel::DETAILED == m_profilingLevel) { + QNN_INFO("Detailed profiling requested. Creating Qnn Profile object."); + if (QNN_PROFILE_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.profileCreate( + m_backendHandle, QNN_PROFILE_LEVEL_DETAILED, &m_profileBackendHandle)) { + QNN_ERROR("Unable to create profile handle in the backend."); + return StatusCode::FAILURE; + } + } + } + return StatusCode::SUCCESS; +} + +// Simple method to report error from app to lib. +int32_t sample_app::QnnSampleApp::reportError(const std::string& err) { + QNN_ERROR("%s", err.c_str()); + return EXIT_FAILURE; +} + +// Initialize a QnnBackend. +sample_app::StatusCode sample_app::QnnSampleApp::initializeBackend() { + auto qnnStatus = m_qnnFunctionPointers.qnnInterface.backendCreate( + m_logHandle, (const QnnBackend_Config_t**)m_backendConfig, &m_backendHandle); + if (QNN_BACKEND_NO_ERROR != qnnStatus) { + QNN_ERROR("Could not initialize backend due to error = %d", qnnStatus); + return StatusCode::FAILURE; + } + QNN_INFO("Initialize Backend Returned Status = %d", qnnStatus); + m_isBackendInitialized = true; + return StatusCode::SUCCESS; +} + +// Terminate the backend after done. +sample_app::StatusCode sample_app::QnnSampleApp::terminateBackend() { + if ((m_isBackendInitialized && nullptr != m_qnnFunctionPointers.qnnInterface.backendFree) && + QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendFree(m_backendHandle)) { + QNN_ERROR("Could not terminate backend"); + return StatusCode::FAILURE; + } + m_isBackendInitialized = false; + return StatusCode::SUCCESS; +} + +// Register op packages and interface providers supplied during +// object creation. If there are multiple op packages, register +// them sequentially in the order provided. +sample_app::StatusCode sample_app::QnnSampleApp::registerOpPackages() { + const size_t pathIdx = 0; + const size_t interfaceProviderIdx = 1; + for (auto const& opPackagePath : m_opPackagePaths) { + std::vector opPackage; + split(opPackage, opPackagePath, ':'); + QNN_DEBUG("opPackagePath: %s", opPackagePath.c_str()); + const char* target = nullptr; + const size_t targetIdx = 2; + if (opPackage.size() != 2 && opPackage.size() != 3) { + QNN_ERROR("Malformed opPackageString provided: %s", opPackagePath.c_str()); + return StatusCode::FAILURE; + } + if (opPackage.size() == 3) { + target = (char*)opPackage[targetIdx].c_str(); + } + if (nullptr == m_qnnFunctionPointers.qnnInterface.backendRegisterOpPackage) { + QNN_ERROR("backendRegisterOpPackageFnHandle is nullptr."); + return StatusCode::FAILURE; + } + if (QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendRegisterOpPackage( + m_backendHandle, + (char*)opPackage[pathIdx].c_str(), + (char*)opPackage[interfaceProviderIdx].c_str(), + target)) { + QNN_ERROR("Could not register Op Package: %s and interface provider: %s", + opPackage[pathIdx].c_str(), + opPackage[interfaceProviderIdx].c_str()); + return StatusCode::FAILURE; + } + QNN_INFO("Registered Op Package: %s and interface provider: %s", + opPackage[pathIdx].c_str(), + opPackage[interfaceProviderIdx].c_str()); + } + return StatusCode::SUCCESS; +} + +// Create a Context in a backend. +sample_app::StatusCode sample_app::QnnSampleApp::createContext() { + if (QNN_CONTEXT_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.contextCreate(m_backendHandle, + m_deviceHandle, + (const QnnContext_Config_t**)m_contextConfig, + &m_context)) { + QNN_ERROR("Could not create context"); + return StatusCode::FAILURE; + } + m_isContextCreated = true; + return StatusCode::SUCCESS; +} + +// Free context after done. +sample_app::StatusCode sample_app::QnnSampleApp::freeContext() { + if (QNN_CONTEXT_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.contextFree(m_context, m_profileBackendHandle)) { + QNN_ERROR("Could not free context"); + return StatusCode::FAILURE; + } + m_isContextCreated = false; + return StatusCode::SUCCESS; +} + +// Calls composeGraph function in QNN's model.so. +// composeGraphs is supposed to populate graph related +// information in m_graphsInfo and m_graphsCount. +// m_debug is the option supplied to composeGraphs to +// say that all intermediate tensors including output tensors +// are expected to be read by the app. +sample_app::StatusCode sample_app::QnnSampleApp::composeGraphs() { + auto returnStatus = StatusCode::SUCCESS; + if (qnn_wrapper_api::ModelError_t::MODEL_NO_ERROR != + m_qnnFunctionPointers.composeGraphsFnHandle( + m_backendHandle, + m_qnnFunctionPointers.qnnInterface, + m_context, + (const qnn_wrapper_api::GraphConfigInfo_t**)m_graphConfigsInfo, + m_graphConfigsInfoCount, + &m_graphsInfo, + &m_graphsCount, + m_debug, + log::getLogCallback(), + log::getLogLevel())) { + QNN_ERROR("Failed in composeGraphs()"); + returnStatus = StatusCode::FAILURE; + } + return returnStatus; +} + +sample_app::StatusCode sample_app::QnnSampleApp::finalizeGraphs() { + for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { + if (QNN_GRAPH_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.graphFinalize( + (*m_graphsInfo)[graphIdx].graph, m_profileBackendHandle, nullptr)) { + return StatusCode::FAILURE; + } + } + if (ProfilingLevel::OFF != m_profilingLevel) { + extractBackendProfilingInfo(m_profileBackendHandle); + } + auto returnStatus = StatusCode::SUCCESS; + if (!m_saveBinaryName.empty()) { + QNN_INFO("Before saveBinary(): saving context and metadata."); + returnStatus = saveBinary(); + } else { + QNN_DEBUG("m_saveBinaryName is empty()"); + } + return returnStatus; +} + +sample_app::StatusCode sample_app::QnnSampleApp::createFromBinary() { + if (m_cachedBinaryPath.empty()) { + QNN_ERROR("No name provided to read binary file from."); + return StatusCode::FAILURE; + } + if (nullptr == m_qnnFunctionPointers.qnnSystemInterface.systemContextCreate || + nullptr == m_qnnFunctionPointers.qnnSystemInterface.systemContextGetBinaryInfo || + nullptr == m_qnnFunctionPointers.qnnSystemInterface.systemContextFree) { + QNN_ERROR("QNN System function pointers are not populated."); + return StatusCode::FAILURE; + } + uint64_t bufferSize{0}; + std::shared_ptr buffer{nullptr}; + // read serialized binary into a byte buffer + tools::datautil::StatusCode status{tools::datautil::StatusCode::SUCCESS}; + std::tie(status, bufferSize) = tools::datautil::getFileSize(m_cachedBinaryPath); + if (0 == bufferSize) { + QNN_ERROR("Received path to an empty file. Nothing to deserialize."); + return StatusCode::FAILURE; + } + buffer = std::shared_ptr(new uint8_t[bufferSize], std::default_delete()); + if (!buffer) { + QNN_ERROR("Failed to allocate memory."); + return StatusCode::FAILURE; + } + + status = tools::datautil::readBinaryFromFile( + m_cachedBinaryPath, reinterpret_cast(buffer.get()), bufferSize); + if (status != tools::datautil::StatusCode::SUCCESS) { + QNN_ERROR("Failed to read binary data."); + return StatusCode::FAILURE; + } + + // inspect binary info + auto returnStatus = StatusCode::SUCCESS; + QnnSystemContext_Handle_t sysCtxHandle{nullptr}; + if (QNN_SUCCESS != m_qnnFunctionPointers.qnnSystemInterface.systemContextCreate(&sysCtxHandle)) { + QNN_ERROR("Could not create system handle."); + returnStatus = StatusCode::FAILURE; + } + const QnnSystemContext_BinaryInfo_t* binaryInfo{nullptr}; + Qnn_ContextBinarySize_t binaryInfoSize{0}; + if (StatusCode::SUCCESS == returnStatus && + QNN_SUCCESS != m_qnnFunctionPointers.qnnSystemInterface.systemContextGetBinaryInfo( + sysCtxHandle, + static_cast(buffer.get()), + bufferSize, + &binaryInfo, + &binaryInfoSize)) { + QNN_ERROR("Failed to get context binary info"); + returnStatus = StatusCode::FAILURE; + } + + // fill GraphInfo_t based on binary info + if (StatusCode::SUCCESS == returnStatus && + !copyMetadataToGraphsInfo(binaryInfo, m_graphsInfo, m_graphsCount)) { + QNN_ERROR("Failed to copy metadata."); + returnStatus = StatusCode::FAILURE; + } + m_qnnFunctionPointers.qnnSystemInterface.systemContextFree(sysCtxHandle); + sysCtxHandle = nullptr; + + if (StatusCode::SUCCESS == returnStatus && + nullptr == m_qnnFunctionPointers.qnnInterface.contextCreateFromBinary) { + QNN_ERROR("contextCreateFromBinaryFnHandle is nullptr."); + returnStatus = StatusCode::FAILURE; + } + if (StatusCode::SUCCESS == returnStatus && + m_qnnFunctionPointers.qnnInterface.contextCreateFromBinary( + m_backendHandle, + m_deviceHandle, + (const QnnContext_Config_t**)m_contextConfig, + static_cast(buffer.get()), + bufferSize, + &m_context, + m_profileBackendHandle)) { + QNN_ERROR("Could not create context from binary."); + returnStatus = StatusCode::FAILURE; + } + if (ProfilingLevel::OFF != m_profilingLevel) { + extractBackendProfilingInfo(m_profileBackendHandle); + } + m_isContextCreated = true; + if (StatusCode::SUCCESS == returnStatus) { + for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { + if (nullptr == m_qnnFunctionPointers.qnnInterface.graphRetrieve) { + QNN_ERROR("graphRetrieveFnHandle is nullptr."); + returnStatus = StatusCode::FAILURE; + break; + } + if (QNN_SUCCESS != + m_qnnFunctionPointers.qnnInterface.graphRetrieve( + m_context, (*m_graphsInfo)[graphIdx].graphName, &((*m_graphsInfo)[graphIdx].graph))) { + QNN_ERROR("Unable to retrieve graph handle for graph Idx: %d", graphIdx); + returnStatus = StatusCode::FAILURE; + } + } + } + if (StatusCode::SUCCESS != returnStatus) { + QNN_DEBUG("Cleaning up graph Info structures."); + qnn_wrapper_api::freeGraphsInfo(&m_graphsInfo, m_graphsCount); + } + return returnStatus; +} + +sample_app::StatusCode sample_app::QnnSampleApp::saveBinary() { + if (m_saveBinaryName.empty()) { + QNN_ERROR("No name provided to save binary file."); + return StatusCode::FAILURE; + } + if (nullptr == m_qnnFunctionPointers.qnnInterface.contextGetBinarySize || + nullptr == m_qnnFunctionPointers.qnnInterface.contextGetBinary) { + QNN_ERROR("contextGetBinarySizeFnHandle or contextGetBinaryFnHandle is nullptr."); + return StatusCode::FAILURE; + } + uint64_t requiredBufferSize{0}; + if (QNN_CONTEXT_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.contextGetBinarySize(m_context, &requiredBufferSize)) { + QNN_ERROR("Could not get the required binary size."); + return StatusCode::FAILURE; + } + std::unique_ptr saveBuffer(new uint8_t[requiredBufferSize]); + if (nullptr == saveBuffer) { + QNN_ERROR("Could not allocate buffer to save binary."); + return StatusCode::FAILURE; + } + uint64_t writtenBufferSize{0}; + if (QNN_CONTEXT_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.contextGetBinary(m_context, + reinterpret_cast(saveBuffer.get()), + requiredBufferSize, + &writtenBufferSize)) { + QNN_ERROR("Could not get binary."); + return StatusCode::FAILURE; + } + if (requiredBufferSize < writtenBufferSize) { + QNN_ERROR( + "Illegal written buffer size [%d] bytes. Cannot exceed allocated memory of [%d] bytes", + writtenBufferSize, + requiredBufferSize); + return StatusCode::FAILURE; + } +#ifndef __hexagon__ + auto dataUtilStatus = tools::datautil::writeBinaryToFile( + m_outputPath, m_saveBinaryName + ".bin", (uint8_t*)saveBuffer.get(), writtenBufferSize); + if (tools::datautil::StatusCode::SUCCESS != dataUtilStatus) { + QNN_ERROR("Error while writing binary to file."); + return StatusCode::FAILURE; + } +#endif + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::extractBackendProfilingInfo( + Qnn_ProfileHandle_t profileHandle) { + if (nullptr == m_profileBackendHandle) { + QNN_ERROR("Backend Profile handle is nullptr; may not be initialized."); + return StatusCode::FAILURE; + } + const QnnProfile_EventId_t* profileEvents{nullptr}; + uint32_t numEvents{0}; + if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetEvents( + profileHandle, &profileEvents, &numEvents)) { + QNN_ERROR("Failure in profile get events."); + return StatusCode::FAILURE; + } + QNN_DEBUG("ProfileEvents: [%p], numEvents: [%d]", profileEvents, numEvents); + for (size_t event = 0; event < numEvents; event++) { + extractProfilingEvent(*(profileEvents + event)); + extractProfilingSubEvents(*(profileEvents + event)); + } + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::extractProfilingSubEvents( + QnnProfile_EventId_t profileEventId) { + const QnnProfile_EventId_t* profileSubEvents{nullptr}; + uint32_t numSubEvents{0}; + if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetSubEvents( + profileEventId, &profileSubEvents, &numSubEvents)) { + QNN_ERROR("Failure in profile get sub events."); + return StatusCode::FAILURE; + } + QNN_DEBUG("ProfileSubEvents: [%p], numSubEvents: [%d]", profileSubEvents, numSubEvents); + for (size_t subEvent = 0; subEvent < numSubEvents; subEvent++) { + extractProfilingEvent(*(profileSubEvents + subEvent)); + extractProfilingSubEvents(*(profileSubEvents + subEvent)); + } + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::extractProfilingEvent( + QnnProfile_EventId_t profileEventId) { + QnnProfile_EventData_t eventData; + if (QNN_PROFILE_NO_ERROR != + m_qnnFunctionPointers.qnnInterface.profileGetEventData(profileEventId, &eventData)) { + QNN_ERROR("Failure in profile get event type."); + return StatusCode::FAILURE; + } + QNN_DEBUG("Printing Event Info - Event Type: [%d], Event Value: [%" PRIu64 + "], Event Identifier: [%s], Event Unit: [%d]", + eventData.type, + eventData.value, + eventData.identifier, + eventData.unit); + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::verifyFailReturnStatus(Qnn_ErrorHandle_t errCode) { + auto returnStatus = sample_app::StatusCode::FAILURE; + switch (errCode) { + case QNN_COMMON_ERROR_SYSTEM_COMMUNICATION: + returnStatus = sample_app::StatusCode::FAILURE_SYSTEM_COMMUNICATION_ERROR; + break; + case QNN_COMMON_ERROR_SYSTEM: + returnStatus = sample_app::StatusCode::FAILURE_SYSTEM_ERROR; + break; + case QNN_COMMON_ERROR_NOT_SUPPORTED: + returnStatus = sample_app::StatusCode::QNN_FEATURE_UNSUPPORTED; + break; + default: + break; + } + return returnStatus; +} + +sample_app::StatusCode sample_app::QnnSampleApp::isDevicePropertySupported() { + if (nullptr != m_qnnFunctionPointers.qnnInterface.propertyHasCapability) { + auto qnnStatus = + m_qnnFunctionPointers.qnnInterface.propertyHasCapability(QNN_PROPERTY_GROUP_DEVICE); + if (QNN_PROPERTY_NOT_SUPPORTED == qnnStatus) { + QNN_WARN("Device property is not supported"); + } + if (QNN_PROPERTY_ERROR_UNKNOWN_KEY == qnnStatus) { + QNN_ERROR("Device property is not known to backend"); + return StatusCode::FAILURE; + } + } + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::createDevice() { + if (nullptr != m_qnnFunctionPointers.qnnInterface.deviceCreate) { + auto qnnStatus = + m_qnnFunctionPointers.qnnInterface.deviceCreate(m_logHandle, nullptr, &m_deviceHandle); + if (QNN_SUCCESS != qnnStatus && QNN_DEVICE_ERROR_UNSUPPORTED_FEATURE != qnnStatus) { + QNN_ERROR("Failed to create device"); + return verifyFailReturnStatus(qnnStatus); + } + } + return StatusCode::SUCCESS; +} + +sample_app::StatusCode sample_app::QnnSampleApp::freeDevice() { + if (nullptr != m_qnnFunctionPointers.qnnInterface.deviceFree) { + auto qnnStatus = m_qnnFunctionPointers.qnnInterface.deviceFree(m_deviceHandle); + if (QNN_SUCCESS != qnnStatus && QNN_DEVICE_ERROR_UNSUPPORTED_FEATURE != qnnStatus) { + QNN_ERROR("Failed to free device"); + return verifyFailReturnStatus(qnnStatus); + } + } + return StatusCode::SUCCESS; +} + +// executeGraphs() that is currently used by qnn-sample-app's main.cpp. +// This function runs all the graphs present in model.so by reading +// inputs from input_list based files and writes output to .raw files. +sample_app::StatusCode sample_app::QnnSampleApp::executeGraphs() { + auto returnStatus = StatusCode::SUCCESS; + for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { + QNN_DEBUG("Starting execution for graphIdx: %d", graphIdx); + if (graphIdx >= m_inputFileLists.size()) { + QNN_ERROR("No Inputs available for: %d", graphIdx); + returnStatus = StatusCode::FAILURE; + break; + } + Qnn_Tensor_t* inputs = nullptr; + Qnn_Tensor_t* outputs = nullptr; + if (iotensor::StatusCode::SUCCESS != + m_ioTensor.setupInputAndOutputTensors(&inputs, &outputs, (*m_graphsInfo)[graphIdx])) { + QNN_ERROR("Error in setting up Input and output Tensors for graphIdx: %d", graphIdx); + returnStatus = StatusCode::FAILURE; + break; + } + auto inputFileList = m_inputFileLists[graphIdx]; + auto graphInfo = (*m_graphsInfo)[graphIdx]; + if (!inputFileList.empty()) { + size_t totalCount = inputFileList[0].size(); + size_t inputFileIndexOffset = 0; + while (inputFileIndexOffset < totalCount) { + iotensor::StatusCode iotReturnStatus; + size_t numInputFilesPopulated; + size_t batchSize; + std::tie(iotReturnStatus, numInputFilesPopulated, batchSize) = + m_ioTensor.populateInputTensors(graphIdx, + inputFileList, + inputFileIndexOffset, + false, + m_inputNameToIndex[graphIdx], + inputs, + graphInfo, + m_inputDataType); + if (iotensor::StatusCode::SUCCESS != iotReturnStatus) { + returnStatus = StatusCode::FAILURE; + } + if (StatusCode::SUCCESS == returnStatus) { + QNN_DEBUG("Successfully populated input tensors for graphIdx: %d", graphIdx); + Qnn_ErrorHandle_t executeStatus = QNN_GRAPH_NO_ERROR; + executeStatus = + m_qnnFunctionPointers.qnnInterface.graphExecute(graphInfo.graph, + inputs, + graphInfo.numInputTensors, + outputs, + graphInfo.numOutputTensors, + m_profileBackendHandle, + nullptr); + if (QNN_GRAPH_NO_ERROR != executeStatus) { + returnStatus = StatusCode::FAILURE; + } + if (StatusCode::SUCCESS == returnStatus) { + QNN_DEBUG("Successfully executed graphIdx: %d ", graphIdx); +#ifndef __hexagon__ + if (iotensor::StatusCode::SUCCESS != + m_ioTensor.writeOutputTensors(graphIdx, + inputFileIndexOffset, + graphInfo.graphName, + outputs, + graphInfo.numOutputTensors, + m_outputDataType, + m_graphsCount, + m_outputPath, + numInputFilesPopulated, + batchSize)) { + returnStatus = StatusCode::FAILURE; + } +#endif + } + inputFileIndexOffset += numInputFilesPopulated; + } + if (StatusCode::SUCCESS != returnStatus) { + QNN_ERROR("Execution of Graph: %d failed!", graphIdx); + break; + } + } + } + m_ioTensor.tearDownInputAndOutputTensors( + inputs, outputs, graphInfo.numInputTensors, graphInfo.numOutputTensors); + inputs = nullptr; + outputs = nullptr; + if (StatusCode::SUCCESS != returnStatus) { + break; + } + } + + qnn_wrapper_api::freeGraphsInfo(&m_graphsInfo, m_graphsCount); + m_graphsInfo = nullptr; + return returnStatus; +} diff --git a/SampleApp/src/QnnSampleApp.hpp b/SampleApp/src/QnnSampleApp.hpp new file mode 100644 index 0000000000000000000000000000000000000000..530d1c153cfddc6bebb1f30e9ee976d116aded05 --- /dev/null +++ b/SampleApp/src/QnnSampleApp.hpp @@ -0,0 +1,128 @@ +//============================================================================== +// +// Copyright (c) 2020-2023 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== +#pragma once + +#include +#include + +#include "IOTensor.hpp" +#include "SampleApp.hpp" + +namespace qnn { +namespace tools { +namespace sample_app { + +enum class StatusCode { + SUCCESS, + FAILURE, + FAILURE_INPUT_LIST_EXHAUSTED, + FAILURE_SYSTEM_ERROR, + FAILURE_SYSTEM_COMMUNICATION_ERROR, + QNN_FEATURE_UNSUPPORTED +}; + +class QnnSampleApp { + public: + QnnSampleApp(QnnFunctionPointers qnnFunctionPointers, + std::string inputListPaths, + std::string opPackagePaths, + void *backendHandle, + std::string outputPath = s_defaultOutputPath, + bool debug = false, + iotensor::OutputDataType outputDataType = iotensor::OutputDataType::FLOAT_ONLY, + iotensor::InputDataType inputDataType = iotensor::InputDataType::FLOAT, + ProfilingLevel profilingLevel = ProfilingLevel::OFF, + bool dumpOutputs = false, + std::string cachedBinaryPath = "", + std::string saveBinaryName = ""); + + // @brief Print a message to STDERR then return a nonzero + // exit status. + int32_t reportError(const std::string &err); + + StatusCode initialize(); + + StatusCode initializeBackend(); + + StatusCode createContext(); + + StatusCode composeGraphs(); + + StatusCode finalizeGraphs(); + + StatusCode executeGraphs(); + + StatusCode registerOpPackages(); + + StatusCode createFromBinary(); + + StatusCode saveBinary(); + + StatusCode freeContext(); + + StatusCode terminateBackend(); + + StatusCode freeGraphs(); + + Qnn_ContextHandle_t getContext(); + + StatusCode initializeProfiling(); + + std::string getBackendBuildId(); + + StatusCode isDevicePropertySupported(); + + StatusCode createDevice(); + + StatusCode freeDevice(); + + StatusCode verifyFailReturnStatus(Qnn_ErrorHandle_t errCode); + + virtual ~QnnSampleApp(); + + private: + StatusCode extractBackendProfilingInfo(Qnn_ProfileHandle_t profileHandle); + + StatusCode extractProfilingSubEvents(QnnProfile_EventId_t profileEventId); + + StatusCode extractProfilingEvent(QnnProfile_EventId_t profileEventId); + + static const std::string s_defaultOutputPath; + + QnnFunctionPointers m_qnnFunctionPointers; + std::vector m_inputListPaths; + std::vector>> m_inputFileLists; + std::vector> m_inputNameToIndex; + std::vector m_opPackagePaths; + std::string m_outputPath; + std::string m_saveBinaryName; + std::string m_cachedBinaryPath; + QnnBackend_Config_t **m_backendConfig = nullptr; + Qnn_ContextHandle_t m_context = nullptr; + QnnContext_Config_t **m_contextConfig = nullptr; + bool m_debug; + iotensor::OutputDataType m_outputDataType; + iotensor::InputDataType m_inputDataType; + ProfilingLevel m_profilingLevel; + bool m_dumpOutputs; + qnn_wrapper_api::GraphInfo_t **m_graphsInfo; + uint32_t m_graphsCount; + void *m_backendLibraryHandle; + iotensor::IOTensor m_ioTensor; + bool m_isBackendInitialized; + bool m_isContextCreated; + Qnn_ProfileHandle_t m_profileBackendHandle = nullptr; + qnn_wrapper_api::GraphConfigInfo_t **m_graphConfigsInfo = nullptr; + uint32_t m_graphConfigsInfoCount; + Qnn_LogHandle_t m_logHandle = nullptr; + Qnn_BackendHandle_t m_backendHandle = nullptr; + Qnn_DeviceHandle_t m_deviceHandle = nullptr; +}; +} // namespace sample_app +} // namespace tools +} // namespace qnn diff --git a/SampleApp/src/QnnTypeMacros.hpp b/SampleApp/src/QnnTypeMacros.hpp new file mode 100644 index 0000000000000000000000000000000000000000..fc134cf516357fee8abcd167683a5fdc34d9d1ff --- /dev/null +++ b/SampleApp/src/QnnTypeMacros.hpp @@ -0,0 +1,668 @@ +//============================================================================== +// +// Copyright (c) 2021-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include "QnnTypes.h" + +#define QNN_OP_CFG_VALID(opConfig) ((opConfig).version == QNN_OPCONFIG_VERSION_1) + +inline Qnn_OpConfig_t createQnnOpConfig(const Qnn_OpConfigVersion_t version) { + Qnn_OpConfig_t opConfig = QNN_OPCONFIG_INIT; + opConfig.version = version; + if (version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1 = QNN_OPCONFIG_V1_INIT; + } + return opConfig; +} + +inline const char* getQnnOpConfigName(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.name; + } + return NULL; +} + +inline const char* getQnnOpConfigName(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigName(*opConfig); +} + +inline const char* getQnnOpConfigPackageName(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.packageName; + } + return NULL; +} + +inline const char* getQnnOpConfigPackageName(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigPackageName(*opConfig); +} + +inline const char* getQnnOpConfigTypeName(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.typeName; + } + return NULL; +} + +inline const char* getQnnOpConfigTypeName(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigTypeName(*opConfig); +} + +inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.numOfParams; + } + return 0u; +} + +inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigNumParams(*opConfig); +} + +inline const Qnn_Param_t* getQnnOpConfigParams(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.params; + } + return NULL; +} + +inline const Qnn_Param_t* getQnnOpConfigParams(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigParams(*opConfig); +} + +inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.numOfInputs; + } + return 0u; +} + +inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigNumInputs(*opConfig); +} + +inline const Qnn_Tensor_t* getQnnOpConfigInputs(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.inputTensors; + } + return NULL; +} + +inline const Qnn_Tensor_t* getQnnOpConfigInputs(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigInputs(*opConfig); +} + +inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.numOfOutputs; + } + return 0u; +} + +inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigNumOutputs(*opConfig); +} + +inline const Qnn_Tensor_t* getQnnOpConfigOutputs(const Qnn_OpConfig_t& opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.outputTensors; + } + return NULL; +} + +inline const Qnn_Tensor_t* getQnnOpConfigOutputs(const Qnn_OpConfig_t* const opConfig) { + return getQnnOpConfigOutputs(*opConfig); +} + +inline void setQnnOpConfigName(Qnn_OpConfig_t& opConfig, const char* const name) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.name = name; + } +} + +inline void setQnnOpConfigName(Qnn_OpConfig_t* const opConfig, const char* const name) { + setQnnOpConfigName(*opConfig, name); +} + +inline void setQnnOpConfigPackageName(Qnn_OpConfig_t& opConfig, const char* const packageName) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.packageName = packageName; + } +} + +inline void setQnnOpConfigPackageName(Qnn_OpConfig_t* const opConfig, + const char* const packageName) { + setQnnOpConfigPackageName(*opConfig, packageName); +} + +inline void setQnnOpConfigTypeName(Qnn_OpConfig_t& opConfig, const char* const typeName) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.typeName = typeName; + } +} + +inline void setQnnOpConfigTypeName(Qnn_OpConfig_t* const opConfig, const char* const typeName) { + setQnnOpConfigTypeName(*opConfig, typeName); +} + +inline void setQnnOpConfigParams(Qnn_OpConfig_t& opConfig, + uint32_t const numOfParams, + Qnn_Param_t* const params) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.numOfParams = numOfParams; + opConfig.v1.params = params; + } +} + +inline void setQnnOpConfigParams(Qnn_OpConfig_t* const opConfig, + uint32_t const numOfParams, + Qnn_Param_t* const params) { + setQnnOpConfigParams(*opConfig, numOfParams, params); +} + +inline void setQnnOpConfigInputs(Qnn_OpConfig_t& opConfig, + uint32_t const numOfInputs, + Qnn_Tensor_t* const inputTensors) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.numOfInputs = numOfInputs; + opConfig.v1.inputTensors = inputTensors; + } +} + +inline void setQnnOpConfigInputs(Qnn_OpConfig_t* const opConfig, + uint32_t const numOfInputs, + Qnn_Tensor_t* const inputTensors) { + setQnnOpConfigInputs(*opConfig, numOfInputs, inputTensors); +} + +inline void setQnnOpConfigOutputs(Qnn_OpConfig_t& opConfig, + uint32_t const numOfOutputs, + Qnn_Tensor_t* const outputTensors) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.numOfOutputs = numOfOutputs; + opConfig.v1.outputTensors = outputTensors; + } +} + +inline void setQnnOpConfigOutputs(Qnn_OpConfig_t* const opConfig, + uint32_t const numOfOutputs, + Qnn_Tensor_t* const outputTensors) { + setQnnOpConfigOutputs(*opConfig, numOfOutputs, outputTensors); +} + +inline Qnn_Tensor_t createQnnTensor(const Qnn_TensorVersion_t version) { + Qnn_Tensor_t tensor = QNN_TENSOR_INIT; + tensor.version = version; + if (version == QNN_TENSOR_VERSION_1) { + tensor.v1 = QNN_TENSOR_V1_INIT; + } else if (version == QNN_TENSOR_VERSION_2) { + tensor.v2 = QNN_TENSOR_V2_INIT; + } + return tensor; +} + +inline uint32_t getQnnTensorId(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.id; +} + +inline uint32_t getQnnTensorId(const Qnn_Tensor_t* const tensor) { return getQnnTensorId(*tensor); } + +inline const char* getQnnTensorName(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.name; +} + +inline const char* getQnnTensorName(const Qnn_Tensor_t* const tensor) { + return getQnnTensorName(*tensor); +} + +inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.type; +} + +inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t* const tensor) { + return getQnnTensorType(*tensor); +} + +inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.dataFormat; +} + +inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t* const tensor) { + return getQnnTensorDataFormat(*tensor); +} + +inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.dataType; +} + +inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t* const tensor) { + return getQnnTensorDataType(*tensor); +} + +inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.quantizeParams; +} + +inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t* const tensor) { + if (tensor != nullptr) { + return getQnnTensorQuantParams(*tensor); + } + return QNN_QUANTIZE_PARAMS_INIT; +} + +inline uint32_t getQnnTensorRank(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.rank; +} + +inline uint32_t getQnnTensorRank(const Qnn_Tensor_t* const tensor) { + if (tensor != nullptr) { + return getQnnTensorRank(*tensor); + } + return 0u; +} + +inline uint32_t* getQnnTensorDimensions(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.dimensions; +} + +inline uint32_t* getQnnTensorDimensions(const Qnn_Tensor_t* const tensor) { + return getQnnTensorDimensions(*tensor); +} + +inline uint8_t* getQnnTensorIsDynamicDimensions(const Qnn_Tensor_t& tensor) { + if (tensor.version == QNN_TENSOR_VERSION_1) { + return NULL; + } else if (tensor.version == QNN_TENSOR_VERSION_2) { + return tensor.v2.isDynamicDimensions; + } + return NULL; +} + +inline uint8_t* getQnnTensorIsDynamicDimensions(const Qnn_Tensor_t* tensor) { + return getQnnTensorIsDynamicDimensions(*tensor); +} + +inline Qnn_SparseParams_t getQnnTensorSparseParams(const Qnn_Tensor_t& tensor) { + if (tensor.version == QNN_TENSOR_VERSION_1) { + return QNN_SPARSE_PARAMS_INIT; + } else if (tensor.version == QNN_TENSOR_VERSION_2) { + return tensor.v2.sparseParams; + } + return QNN_SPARSE_PARAMS_INIT; +} + +inline Qnn_SparseParams_t getQnnTensorSparseParams(const Qnn_Tensor_t* tensor) { + return getQnnTensorSparseParams(*tensor); +} + +inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.memType; +} + +inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t* const tensor) { + return getQnnTensorMemType(*tensor); +} + +inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.clientBuf; +} + +inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t* const tensor) { + return getQnnTensorClientBuf(*tensor); +} + +inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t& tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.memHandle; +} + +inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t* const tensor) { + return getQnnTensorMemHandle(*tensor); +} + +inline void setQnnTensorId(Qnn_Tensor_t& tensor, const uint32_t id) { + // TensorCompatTest justifies no need to check version + tensor.v1.id = id; +} + +inline void setQnnTensorId(Qnn_Tensor_t* const tensor, const uint32_t id) { + setQnnTensorId(*tensor, id); +} + +inline void setQnnTensorName(Qnn_Tensor_t& tensor, const char* const name) { + // TensorCompatTest justifies no need to check version + tensor.v1.name = name; +} + +inline void setQnnTensorName(Qnn_Tensor_t* const tensor, const char* const name) { + setQnnTensorName(*tensor, name); +} + +inline void setQnnTensorType(Qnn_Tensor_t& tensor, const Qnn_TensorType_t type) { + // TensorCompatTest justifies no need to check version + tensor.v1.type = type; +} + +inline void setQnnTensorType(Qnn_Tensor_t* const tensor, const Qnn_TensorType_t type) { + setQnnTensorType(*tensor, type); +} + +inline void setQnnTensorDataFormat(Qnn_Tensor_t& tensor, const Qnn_TensorDataFormat_t dataFormat) { + // TensorCompatTest justifies no need to check version + tensor.v1.dataFormat = dataFormat; +} + +inline void setQnnTensorDataFormat(Qnn_Tensor_t* const tensor, + const Qnn_TensorDataFormat_t format) { + setQnnTensorDataFormat(*tensor, format); +} + +inline void setQnnTensorDataType(Qnn_Tensor_t& tensor, const Qnn_DataType_t dataType) { + // TensorCompatTest justifies no need to check version + tensor.v1.dataType = dataType; +} + +inline void setQnnTensorDataType(Qnn_Tensor_t* const tensor, const Qnn_DataType_t dataType) { + setQnnTensorDataType(*tensor, dataType); +} + +inline void setQnnTensorQuantParams(Qnn_Tensor_t& tensor, + const Qnn_QuantizeParams_t quantizeParams) { + // TensorCompatTest justifies no need to check version + tensor.v1.quantizeParams = quantizeParams; +} + +inline void setQnnTensorQuantParams(Qnn_Tensor_t* const tensor, const Qnn_QuantizeParams_t params) { + setQnnTensorQuantParams(*tensor, params); +} + +inline void setQnnTensorRank(Qnn_Tensor_t& tensor, const uint32_t rank) { + // TensorCompatTest justifies no need to check version + tensor.v1.rank = rank; +} + +inline void setQnnTensorRank(Qnn_Tensor_t* const tensor, const uint32_t rank) { + setQnnTensorRank(*tensor, rank); +} + +inline void setQnnTensorDimensions(Qnn_Tensor_t& tensor, uint32_t* const dimensions) { + // TensorCompatTest justifies no need to check version + tensor.v1.dimensions = dimensions; +} + +inline void setQnnTensorDimensions(Qnn_Tensor_t* const tensor, uint32_t* const dimensions) { + setQnnTensorDimensions(*tensor, dimensions); +} + +inline void setQnnTensorIsDynamicDimensions(Qnn_Tensor_t& tensor, + uint8_t* const isDynamicDimensions) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + tensor.v2.isDynamicDimensions = isDynamicDimensions; + } +} + +inline void setQnnTensorIsDynamicDimensions(Qnn_Tensor_t* tensor, + uint8_t* const isDynamicDimensions) { + setQnnTensorIsDynamicDimensions(*tensor, isDynamicDimensions); +} + +inline void setQnnTensorSparseParams(Qnn_Tensor_t& tensor, const Qnn_SparseParams_t sparseParams) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + tensor.v2.sparseParams = sparseParams; + } +} + +inline void setQnnTensorSparseParams(Qnn_Tensor_t* tensor, Qnn_SparseParams_t sparseParams) { + setQnnTensorSparseParams(*tensor, sparseParams); +} + +inline void setQnnTensorMemType(Qnn_Tensor_t& tensor, const Qnn_TensorMemType_t memType) { + // TensorCompatTest justifies no need to check version + tensor.v1.memType = memType; +} + +inline void setQnnTensorMemType(Qnn_Tensor_t* const tensor, const Qnn_TensorMemType_t memType) { + setQnnTensorMemType(*tensor, memType); +} + +inline void setQnnTensorClientBuf(Qnn_Tensor_t& tensor, const Qnn_ClientBuffer_t clientBuf) { + // TensorCompatTest justifies no need to check version + tensor.v1.clientBuf = clientBuf; +} + +inline void setQnnTensorClientBuf(Qnn_Tensor_t* const tensor, const Qnn_ClientBuffer_t clientBuf) { + setQnnTensorClientBuf(*tensor, clientBuf); +} + +inline void setQnnTensorMemHandle(Qnn_Tensor_t& tensor, const Qnn_MemHandle_t memHandle) { + // TensorCompatTest justifies no need to check version + tensor.v1.memHandle = memHandle; +} + +inline void setQnnTensorMemHandle(Qnn_Tensor_t* const tensor, const Qnn_MemHandle_t handle) { + setQnnTensorMemHandle(*tensor, handle); +} + +inline Qnn_TensorSet_t createQnnTensorSet(const Qnn_TensorSetVersion_t version) { + Qnn_TensorSet_t tensorSet = QNN_TENSOR_SET_INIT; + tensorSet.version = version; + if (version == QNN_TENSOR_SET_VERSION_1) { + tensorSet.v1 = QNN_TENSOR_SET_V1_INIT; + } + return tensorSet; +} + +inline uint32_t getQnnTensorSetNumInputs(const Qnn_TensorSet_t& tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.numInputs; + } + return 0; +} + +inline uint32_t getQnnTensorSetNumInputs(const Qnn_TensorSet_t* tensorSet) { + return getQnnTensorSetNumInputs(*tensorSet); +} + +inline Qnn_Tensor_t* getQnnTensorSetInputTensors(const Qnn_TensorSet_t& tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.inputs; + } + return 0; +} + +inline Qnn_Tensor_t* getQnnTensorSetInputTensors(const Qnn_TensorSet_t* tensorSet) { + return getQnnTensorSetInputTensors(*tensorSet); +} + +inline uint32_t getQnnTensorSetNumOutputs(const Qnn_TensorSet_t& tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.numOutputs; + } + return 0; +} + +inline uint32_t getQnnTensorSetNumOutputs(const Qnn_TensorSet_t* tensorSet) { + return getQnnTensorSetNumOutputs(*tensorSet); +} + +inline Qnn_Tensor_t* getQnnTensorSetOutputTensors(const Qnn_TensorSet_t& tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.outputs; + } + return 0; +} + +inline Qnn_Tensor_t* getQnnTensorSetOutputTensors(const Qnn_TensorSet_t* tensorSet) { + return getQnnTensorSetOutputTensors(*tensorSet); +} + +inline void setQnnTensorSetInputTensors(Qnn_TensorSet_t& tensorSet, + Qnn_Tensor_t* inputTensors, + uint32_t const numInputs) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + tensorSet.v1.inputs = inputTensors; + tensorSet.v1.numInputs = numInputs; + } +} + +inline void setQnnTensorSetInputTensors(Qnn_TensorSet_t* tensorSet, + Qnn_Tensor_t* inputTensors, + uint32_t const numInputs) { + setQnnTensorSetInputTensors(*tensorSet, inputTensors, numInputs); +} + +inline void setQnnTensorSetOutputTensors(Qnn_TensorSet_t& tensorSet, + Qnn_Tensor_t* outputTensors, + const uint32_t numOutputs) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + tensorSet.v1.outputs = outputTensors; + tensorSet.v1.numOutputs = numOutputs; + } +} + +inline void setQnnTensorSetOutputTensors(Qnn_TensorSet_t* tensorSet, + Qnn_Tensor_t* outputTensors, + const uint32_t numOutputs) { + setQnnTensorSetOutputTensors(*tensorSet, outputTensors, numOutputs); +} + +// Creator for QNN Op Config +#define QNN_OP_CFG_CREATE(version) createQnnOpConfig(version) + +// Accessors for QNN Op Config +#define QNN_OP_CFG_GET_NAME(opConfig) getQnnOpConfigName(opConfig) +#define QNN_OP_CFG_GET_PACKAGE_NAME(opConfig) getQnnOpConfigPackageName(opConfig) +#define QNN_OP_CFG_GET_TYPE_NAME(opConfig) getQnnOpConfigTypeName(opConfig) +#define QNN_OP_CFG_GET_NUM_PARAMS(opConfig) getQnnOpConfigNumParams(opConfig) +#define QNN_OP_CFG_GET_PARAMS(opConfig) getQnnOpConfigParams(opConfig) +#define QNN_OP_CFG_GET_NUM_INPUTS(opConfig) getQnnOpConfigNumInputs(opConfig) +#define QNN_OP_CFG_GET_INPUTS(opConfig) getQnnOpConfigInputs(opConfig) +#define QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) getQnnOpConfigNumOutputs(opConfig) +#define QNN_OP_CFG_GET_OUTPUTS(opConfig) getQnnOpConfigOutputs(opConfig) + +// Modifiers for QNN Op Config +#define QNN_OP_CFG_SET_NAME(opConfig, value) setQnnOpConfigName(opConfig, value) +#define QNN_OP_CFG_SET_PACKAGE_NAME(opConfig, value) setQnnOpConfigPackageName(opConfig, value) +#define QNN_OP_CFG_SET_TYPE_NAME(opConfig, value) setQnnOpConfigTypeName(opConfig, value) +#define QNN_OP_CFG_SET_PARAMS(opConfig, numOfParams, params) \ + setQnnOpConfigParams(opConfig, numOfParams, params) +#define QNN_OP_CFG_SET_INPUTS(opConfig, numOfInputs, inputTensors) \ + setQnnOpConfigInputs(opConfig, numOfInputs, inputTensors) +#define QNN_OP_CFG_SET_OUTPUTS(opConfig, numOfOutputs, outputTensors) \ + setQnnOpConfigOutputs(opConfig, numOfOutputs, outputTensors) + +// Creator for QNN Tensor +#define QNN_TENSOR_CREATE(version) createQnnTensor(version) + +// Accessors for QNN Tensor +#define QNN_TENSOR_GET_ID(tensor) getQnnTensorId(tensor) +#define QNN_TENSOR_GET_NAME(tensor) getQnnTensorName(tensor) +#define QNN_TENSOR_GET_TYPE(tensor) getQnnTensorType(tensor) +#define QNN_TENSOR_GET_DATA_FORMAT(tensor) getQnnTensorDataFormat(tensor) +#define QNN_TENSOR_GET_DATA_TYPE(tensor) getQnnTensorDataType(tensor) +#define QNN_TENSOR_GET_QUANT_PARAMS(tensor) getQnnTensorQuantParams(tensor) +#define QNN_TENSOR_GET_RANK(tensor) getQnnTensorRank(tensor) +#define QNN_TENSOR_GET_DIMENSIONS(tensor) getQnnTensorDimensions(tensor) +#define QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor) getQnnTensorIsDynamicDimensions(tensor) +#define QNN_TENSOR_GET_SPARSE_PARAMS(tensor) getQnnTensorSparseParams(tensor) +#define QNN_TENSOR_GET_MEM_TYPE(tensor) getQnnTensorMemType(tensor) +#define QNN_TENSOR_GET_CLIENT_BUF(tensor) getQnnTensorClientBuf(tensor) +#define QNN_TENSOR_GET_MEM_HANDLE(tensor) getQnnTensorMemHandle(tensor) + +// Modifiers for QNN Tensor +#define QNN_TENSOR_SET_ID(tensor, value) setQnnTensorId(tensor, value) +#define QNN_TENSOR_SET_NAME(tensor, value) setQnnTensorName(tensor, value) +#define QNN_TENSOR_SET_TYPE(tensor, value) setQnnTensorType(tensor, value) +#define QNN_TENSOR_SET_DATA_FORMAT(tensor, value) setQnnTensorDataFormat(tensor, value) +#define QNN_TENSOR_SET_DATA_TYPE(tensor, value) setQnnTensorDataType(tensor, value) +#define QNN_TENSOR_SET_QUANT_PARAMS(tensor, value) setQnnTensorQuantParams(tensor, value) +#define QNN_TENSOR_SET_RANK(tensor, value) setQnnTensorRank(tensor, value) +#define QNN_TENSOR_SET_DIMENSIONS(tensor, value) setQnnTensorDimensions(tensor, value) +#define QNN_TENSOR_SET_IS_DYNAMIC_DIMENSIONS(tensor, value) \ + setQnnTensorIsDynamicDimensions(tensor, value) +#define QNN_TENSOR_SET_SPARSE_PARAMS(tensor, value) setQnnTensorSparseParams(tensor, value) +#define QNN_TENSOR_SET_MEM_TYPE(tensor, value) setQnnTensorMemType(tensor, value) +#define QNN_TENSOR_SET_CLIENT_BUF(tensor, value) setQnnTensorClientBuf(tensor, value) +#define QNN_TENSOR_SET_MEM_HANDLE(tensor, value) setQnnTensorMemHandle(tensor, value) + +// Creator for QNN Tensor Set +#define QNN_TENSORSET_CREATE(version) createQnnTensorSet(version) + +// Accessors for QNN Tensor Set +#define QNN_TENSORSET_GET_NUM_INPUTS(tensorSet) getQnnTensorSetNumInputs(tensorSet) +#define QNN_TENSORSET_GET_INPUT_TENSORS(tensorSet) getQnnTensorSetInputTensors(tensorSet) +#define QNN_TENSORSET_GET_NUM_OUTPUTS(tensorSet) getQnnTensorSetNumOutputs(tensorSet) +#define QNN_TENSORSET_GET_OUTPUT_TENSORS(tensorSet) getQnnTensorSetOutputTensors(tensorSet) + +// Modifiers for QNN Tensor Set +#define QNN_TENSORSET_SET_INPUT_TENSORS(tensorSet, inputTensors, numInputs) \ + setQnnTensorSetInputTensors(tensorSet, inputTensors, numInputs) +#define QNN_TENSORSET_SET_OUTPUT_TENSORS(tensorSet, outputTensors, numOutputs) \ + setQnnTensorSetOutputTensors(tensorSet, outputTensors, numOutputs) + +inline bool isQnnTensorV1Compatible(const Qnn_Tensor_t& tensor) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + if (tensor.v2.isDynamicDimensions != NULL) { + return false; + } + + if (tensor.v2.dataFormat == QNN_TENSOR_DATA_FORMAT_SPARSE) { + return false; + } + } + + return true; +} + +inline bool isQnnTensorV1Compatible(const Qnn_Tensor_t* const tensor) { + return isQnnTensorV1Compatible(*tensor); +} + +inline bool isQnnTensorV1Compatible(const Qnn_OpConfig_t& opConfig) { + if ((QNN_OP_CFG_GET_INPUTS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_INPUTS(opConfig) > 0u)) { + for (uint32_t tensorIdx = 0u; tensorIdx < QNN_OP_CFG_GET_NUM_INPUTS(opConfig); tensorIdx++) { + if (!isQnnTensorV1Compatible(QNN_OP_CFG_GET_INPUTS(opConfig)[tensorIdx])) { + return false; + } + } + } + if ((QNN_OP_CFG_GET_OUTPUTS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) > 0u)) { + for (uint32_t tensorIdx = 0u; tensorIdx < QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig); tensorIdx++) { + if (!isQnnTensorV1Compatible(QNN_OP_CFG_GET_OUTPUTS(opConfig)[tensorIdx])) { + return false; + } + } + } + if ((QNN_OP_CFG_GET_PARAMS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_PARAMS(opConfig) > 0)) { + for (uint32_t paramIdx = 0u; paramIdx < QNN_OP_CFG_GET_NUM_PARAMS(opConfig); paramIdx++) { + const Qnn_Param_t& param = QNN_OP_CFG_GET_PARAMS(opConfig)[paramIdx]; + if (QNN_PARAMTYPE_TENSOR == param.paramType) { + if (!isQnnTensorV1Compatible(param.tensorParam)) { + return false; + } + } + } + } + + return true; +} + +inline bool isQnnTensorV1Compatible(const Qnn_OpConfig_t* const opConfig) { + return isQnnTensorV1Compatible(*opConfig); +} diff --git a/SampleApp/src/SampleApp.hpp b/SampleApp/src/SampleApp.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dc0ba8926deaa870b4ddcba4a81d7263b66a7624 --- /dev/null +++ b/SampleApp/src/SampleApp.hpp @@ -0,0 +1,43 @@ +//============================================================================== +// +// Copyright (c) 2020-2023 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include "QnnInterface.h" +#include "QnnWrapperUtils.hpp" +#include "System/QnnSystemInterface.h" + +namespace qnn { +namespace tools { +namespace sample_app { + +// Graph Related Function Handle Types +typedef qnn_wrapper_api::ModelError_t (*ComposeGraphsFnHandleType_t)( + Qnn_BackendHandle_t, + QNN_INTERFACE_VER_TYPE, + Qnn_ContextHandle_t, + const qnn_wrapper_api::GraphConfigInfo_t **, + const uint32_t, + qnn_wrapper_api::GraphInfo_t ***, + uint32_t *, + bool, + QnnLog_Callback_t, + QnnLog_Level_t); +typedef qnn_wrapper_api::ModelError_t (*FreeGraphInfoFnHandleType_t)( + qnn_wrapper_api::GraphInfo_t ***, uint32_t); + +typedef struct QnnFunctionPointers { + ComposeGraphsFnHandleType_t composeGraphsFnHandle; + FreeGraphInfoFnHandleType_t freeGraphInfoFnHandle; + QNN_INTERFACE_VER_TYPE qnnInterface; + QNN_SYSTEM_INTERFACE_VER_TYPE qnnSystemInterface; +} QnnFunctionPointers; + +} // namespace sample_app +} // namespace tools +} // namespace qnn diff --git a/SampleApp/src/Utils/BuildId.hpp b/SampleApp/src/Utils/BuildId.hpp new file mode 100644 index 0000000000000000000000000000000000000000..320e8215436f9e1bd850978639bc2c61de6ee246 --- /dev/null +++ b/SampleApp/src/Utils/BuildId.hpp @@ -0,0 +1,17 @@ +//============================================================================== +// +// Copyright (c) 2020, 2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +namespace qnn { +namespace tools { + +inline std::string getBuildId() { return std::string("v2.24.0.240626131148_96320"); } + +} // namespace tools +} // namespace qnn diff --git a/SampleApp/src/Utils/DataUtil.cpp b/SampleApp/src/Utils/DataUtil.cpp new file mode 100644 index 0000000000000000000000000000000000000000..14381c7da501c92efa5d7ae8279523652637c8fa --- /dev/null +++ b/SampleApp/src/Utils/DataUtil.cpp @@ -0,0 +1,408 @@ +//============================================================================== +// +// Copyright (c) 2019-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== +#include +#include +#include +#include +#include + +#include "DataUtil.hpp" +#include "Logger.hpp" +#ifndef __hexagon__ +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" +#endif + +using namespace qnn; +using namespace qnn::tools; + +std::tuple datautil::getDataTypeSizeInBytes(Qnn_DataType_t dataType) { + if (g_dataTypeToSize.find(dataType) == g_dataTypeToSize.end()) { + QNN_ERROR("Invalid qnn data type provided"); + return std::make_tuple(StatusCode::INVALID_DATA_TYPE, 0); + } + return std::make_tuple(StatusCode::SUCCESS, g_dataTypeToSize.find(dataType)->second); +} + +size_t datautil::calculateElementCount(std::vector dims) { + if (dims.size() == 0) { + return 0; + } + return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); +} + +std::tuple datautil::calculateLength(std::vector dims, + Qnn_DataType_t dataType) { + if (dims.size() == 0) { + QNN_ERROR("dims.size() is zero"); + return std::make_tuple(StatusCode::INVALID_DIMENSIONS, 0); + } + StatusCode returnStatus{StatusCode::SUCCESS}; + size_t length{0}; + std::tie(returnStatus, length) = getDataTypeSizeInBytes(dataType); + if (StatusCode::SUCCESS != returnStatus) { + return std::make_tuple(returnStatus, 0); + } + length *= calculateElementCount(dims); + return std::make_tuple(StatusCode::SUCCESS, length); +} + +datautil::StatusCode datautil::readDataFromFile(std::string filePath, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t* buffer) { + if (nullptr == buffer) { + QNN_ERROR("buffer is nullptr"); + return StatusCode::INVALID_BUFFER; + } + std::ifstream in(filePath, std::ifstream::binary); + if (!in) { + QNN_ERROR("Failed to open input file: %s", filePath.c_str()); + return StatusCode::FILE_OPEN_FAIL; + } + in.seekg(0, in.end); + const size_t length = in.tellg(); + in.seekg(0, in.beg); + StatusCode err{StatusCode::SUCCESS}; + size_t l{0}; + std::tie(err, l) = datautil::calculateLength(dims, dataType); + if (StatusCode::SUCCESS != err) { + return err; + } + if (length != l) { + QNN_ERROR("Input file %s: file size in bytes (%d), should be equal to: %d", + filePath.c_str(), + length, + l); + return StatusCode::DATA_SIZE_MISMATCH; + } + + if (!in.read(reinterpret_cast(buffer), length)) { + QNN_ERROR("Failed to read the contents of: %s", filePath.c_str()); + return StatusCode::DATA_READ_FAIL; + } + return StatusCode::SUCCESS; +} + +datautil::ReadBatchDataRetType_t datautil::readBatchData(const std::vector& filePaths, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + const std::vector& dims, + const Qnn_DataType_t dataType, + uint8_t* buffer) { + if (nullptr == buffer) { + QNN_ERROR("buffer is nullptr"); + return std::make_tuple(StatusCode::INVALID_BUFFER, 0, 0); + } + StatusCode err{StatusCode::SUCCESS}; + size_t tensorLength{0}; + std::tie(err, tensorLength) = datautil::calculateLength(dims, dataType); + if (StatusCode::SUCCESS != err) { + return std::make_tuple(err, 0, 0); + } + size_t numInputsCopied = 0; + size_t numBatchSize = 0; + size_t totalLength = 0; + size_t fileIndex = filePathsIndexOffset; + while (true) { + if (fileIndex >= filePaths.size()) { + if (loopBackToStart) { + fileIndex = fileIndex % filePaths.size(); + } else { + numBatchSize += (tensorLength - totalLength) / (totalLength / numBatchSize); + // pad the vector with zeros + memset(buffer + totalLength, 0, (tensorLength - totalLength) * sizeof(char)); + break; + } + } + std::ifstream in(filePaths[fileIndex], std::ifstream::binary); + if (!in) { + QNN_ERROR("Failed to open input file: %s", (filePaths[fileIndex]).c_str()); + return std::make_tuple(StatusCode::FILE_OPEN_FAIL, numInputsCopied, numBatchSize); + } + in.seekg(0, in.end); + const size_t fileSize = in.tellg(); + in.seekg(0, in.beg); + if ((tensorLength % fileSize) != 0 || fileSize > tensorLength || fileSize == 0) { + QNN_ERROR( + "Given input file %s with file size in bytes %d. If the model expects a batch size of " + "one, the file size should match the tensor extent: %d bytes. If the model expects a " + "batch size > 1, the file size should evenly divide the tensor extent: %d bytes.", + filePaths[fileIndex].c_str(), + fileSize, + tensorLength, + tensorLength); + return std::make_tuple(StatusCode::DATA_SIZE_MISMATCH, numInputsCopied, numBatchSize); + } + if (!in.read(reinterpret_cast(buffer + (numInputsCopied * fileSize)), fileSize)) { + QNN_ERROR("Failed to read the contents of: %s", filePaths.front().c_str()); + return std::make_tuple(StatusCode::DATA_READ_FAIL, numInputsCopied, numBatchSize); + } + totalLength += fileSize; + numInputsCopied += 1; + numBatchSize += 1; + fileIndex += 1; + if (totalLength >= tensorLength) { + break; + } + } + return std::make_tuple(StatusCode::SUCCESS, numInputsCopied, numBatchSize); +} + +std::tuple datautil::getFileSize(std::string filePath) { + std::ifstream in(filePath, std::ifstream::binary); + if (!in) { + QNN_ERROR("Failed to open input file: %s", filePath.c_str()); + return std::make_tuple(StatusCode::FILE_OPEN_FAIL, 0); + } + in.seekg(0, in.end); + const size_t length = in.tellg(); + in.seekg(0, in.beg); + return std::make_tuple(StatusCode::SUCCESS, length); +} + +datautil::StatusCode datautil::readBinaryFromFile(std::string filePath, + uint8_t* buffer, + size_t bufferSize) { + if (nullptr == buffer) { + QNN_ERROR("buffer is nullptr"); + return StatusCode::INVALID_BUFFER; + } + std::ifstream in(filePath, std::ifstream::binary); + if (!in) { + QNN_ERROR("Failed to open input file: %s", filePath.c_str()); + return StatusCode::FILE_OPEN_FAIL; + } + if (!in.read(reinterpret_cast(buffer), bufferSize)) { + QNN_ERROR("Failed to read the contents of: %s", filePath.c_str()); + return StatusCode::DATA_READ_FAIL; + } + return StatusCode::SUCCESS; +} + +#ifndef __hexagon__ +datautil::StatusCode datautil::writeDataToFile(std::string fileDir, + std::string fileName, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t* buffer) { + if (nullptr == buffer) { + QNN_ERROR("buffer is nullptr"); + return StatusCode::INVALID_BUFFER; + } + if (!pal::Directory::makePath(fileDir)) { + QNN_ERROR("Failed to create output directory: %s", fileDir.c_str()); + return StatusCode::DIRECTORY_CREATE_FAIL; + } + const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); + std::ofstream os(outputPath, std::ofstream::binary); + if (!os) { + QNN_ERROR("Failed to open output file for writing: %s", outputPath.c_str()); + return StatusCode::FILE_OPEN_FAIL; + } + StatusCode err{StatusCode::SUCCESS}; + size_t length{0}; + std::tie(err, length) = datautil::calculateLength(dims, dataType); + if (StatusCode::SUCCESS != err) { + return err; + } + for (size_t l = 0; l < length; l++) { + os.write(reinterpret_cast(&(*(buffer + l))), 1); + } + return StatusCode::SUCCESS; +} + +datautil::StatusCode datautil::writeBatchDataToFile(std::vector fileDirs, + std::string fileName, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t* buffer, + const size_t batchSize) { + if (nullptr == buffer) { + QNN_ERROR("buffer is nullptr"); + return StatusCode::INVALID_BUFFER; + } + StatusCode err{StatusCode::SUCCESS}; + size_t length{0}; + std::tie(err, length) = datautil::calculateLength(dims, dataType); + if (StatusCode::SUCCESS != err) { + return err; + } + auto outputSize = (length / batchSize); + for (size_t batchIndex = 0; batchIndex < fileDirs.size(); batchIndex++) { + std::string fileDir = fileDirs[batchIndex]; + if (!pal::Directory::makePath(fileDir)) { + QNN_ERROR("Failed to create output directory: %s", fileDir.c_str()); + return StatusCode::DIRECTORY_CREATE_FAIL; + } + const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); + std::ofstream os(outputPath, std::ofstream::binary); + if (!os) { + QNN_ERROR("Failed to open output file for writing: %s", outputPath.c_str()); + return StatusCode::FILE_OPEN_FAIL; + } + for (size_t l = 0; l < outputSize; l++) { + size_t bufferIndex = l + (batchIndex * outputSize); + os.write(reinterpret_cast(&(*(buffer + bufferIndex))), 1); + } + } + return StatusCode::SUCCESS; +} + +datautil::StatusCode datautil::writeBinaryToFile(std::string fileDir, + std::string fileName, + uint8_t* buffer, + size_t bufferSize) { + if (nullptr == buffer) { + QNN_ERROR("buffer is nullptr"); + return StatusCode::INVALID_BUFFER; + } + if (!pal::Directory::makePath(fileDir)) { + QNN_ERROR("Failed to create output directory: %s", fileDir.c_str()); + return StatusCode::DIRECTORY_CREATE_FAIL; + } + const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); + std::ofstream os(outputPath, std::ofstream::binary); + if (!os) { + QNN_ERROR("Failed to open output file for writing: %s", outputPath.c_str()); + return StatusCode::FILE_OPEN_FAIL; + } + os.write(reinterpret_cast(buffer), bufferSize); + return StatusCode::SUCCESS; +} +#endif + +template +datautil::StatusCode datautil::floatToTfN( + T_QuantType* out, float* in, int32_t offset, float scale, size_t numElements) { + static_assert(std::is_unsigned::value, "floatToTfN supports unsigned only!"); + + if (nullptr == out || nullptr == in) { + QNN_ERROR("Received a nullptr"); + return StatusCode::INVALID_BUFFER; + } + + size_t dataTypeSizeInBytes = sizeof(T_QuantType); + size_t bitWidth = dataTypeSizeInBytes * g_bitsPerByte; + double trueBitWidthMax = pow(2, bitWidth) - 1; + double encodingMin = offset * scale; + double encodingMax = (trueBitWidthMax + offset) * scale; + double encodingRange = encodingMax - encodingMin; + + for (size_t i = 0; i < numElements; ++i) { + int quantizedValue = round(trueBitWidthMax * (in[i] - encodingMin) / encodingRange); + if (quantizedValue < 0) + quantizedValue = 0; + else if (quantizedValue > (int)trueBitWidthMax) + quantizedValue = (int)trueBitWidthMax; + out[i] = static_cast(quantizedValue); + } + return StatusCode::SUCCESS; +} + +template datautil::StatusCode datautil::floatToTfN( + uint8_t* out, float* in, int32_t offset, float scale, size_t numElements); + +template datautil::StatusCode datautil::floatToTfN( + uint16_t* out, float* in, int32_t offset, float scale, size_t numElements); + +template +datautil::StatusCode datautil::tfNToFloat( + float* out, T_QuantType* in, int32_t offset, float scale, size_t numElements) { + static_assert(std::is_unsigned::value, "tfNToFloat supports unsigned only!"); + + if (nullptr == out || nullptr == in) { + QNN_ERROR("Received a nullptr"); + return StatusCode::INVALID_BUFFER; + } + for (size_t i = 0; i < numElements; i++) { + double quantizedValue = static_cast(in[i]); + double offsetDouble = static_cast(offset); + out[i] = static_cast((quantizedValue + offsetDouble) * scale); + } + return StatusCode::SUCCESS; +} + +template datautil::StatusCode datautil::tfNToFloat( + float* out, uint8_t* in, int32_t offset, float scale, size_t numElements); + +template datautil::StatusCode datautil::tfNToFloat( + float* out, uint16_t* in, int32_t offset, float scale, size_t numElements); + +template +datautil::StatusCode datautil::castToFloat(float* out, T_QuantType* in, size_t numElements) { + if (nullptr == out || nullptr == in) { + QNN_ERROR("Received a nullptr"); + return StatusCode::INVALID_BUFFER; + } + for (size_t i = 0; i < numElements; i++) { + out[i] = static_cast(in[i]); + } + return StatusCode::SUCCESS; +} + +template datautil::StatusCode datautil::castToFloat(float* out, + uint8_t* in, + size_t numElements); + +template datautil::StatusCode datautil::castToFloat(float* out, + uint16_t* in, + size_t numElements); + +template datautil::StatusCode datautil::castToFloat(float* out, + uint32_t* in, + size_t numElements); + +template datautil::StatusCode datautil::castToFloat(float* out, + int8_t* in, + size_t numElements); + +template datautil::StatusCode datautil::castToFloat(float* out, + int16_t* in, + size_t numElements); + +template datautil::StatusCode datautil::castToFloat(float* out, + int32_t* in, + size_t numElements); + +template +datautil::StatusCode datautil::castFromFloat(T_QuantType* out, float* in, size_t numElements) { + if (nullptr == out || nullptr == in) { + QNN_ERROR("Received a nullptr"); + return StatusCode::INVALID_BUFFER; + } + for (size_t i = 0; i < numElements; i++) { + out[i] = static_cast(in[i]); + } + return StatusCode::SUCCESS; +} + +template datautil::StatusCode datautil::castFromFloat(uint8_t* out, + float* in, + size_t numElements); + +template datautil::StatusCode datautil::castFromFloat(uint16_t* out, + float* in, + size_t numElements); + +template datautil::StatusCode datautil::castFromFloat(uint32_t* out, + float* in, + size_t numElements); + +template datautil::StatusCode datautil::castFromFloat(int8_t* out, + float* in, + size_t numElements); + +template datautil::StatusCode datautil::castFromFloat(int16_t* out, + float* in, + size_t numElements); + +template datautil::StatusCode datautil::castFromFloat(int32_t* out, + float* in, + size_t numElements); \ No newline at end of file diff --git a/SampleApp/src/Utils/DataUtil.hpp b/SampleApp/src/Utils/DataUtil.hpp new file mode 100644 index 0000000000000000000000000000000000000000..e25b6fc096129e61e6c6fbb1e1b236b5dba65c5f --- /dev/null +++ b/SampleApp/src/Utils/DataUtil.hpp @@ -0,0 +1,127 @@ +//============================================================================== +// +// Copyright (c) 2019-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== +#pragma once + +#include +#include +#include + +#include "QnnTypes.h" + +namespace qnn { +namespace tools { +namespace datautil { +enum class StatusCode { + SUCCESS, + DATA_READ_FAIL, + DATA_WRITE_FAIL, + FILE_OPEN_FAIL, + DIRECTORY_CREATE_FAIL, + INVALID_DIMENSIONS, + INVALID_DATA_TYPE, + DATA_SIZE_MISMATCH, + INVALID_BUFFER, +}; + +const size_t g_bitsPerByte = 8; + +using ReadBatchDataRetType_t = std::tuple; + +std::tuple getDataTypeSizeInBytes(Qnn_DataType_t dataType); + +std::tuple calculateLength(std::vector dims, Qnn_DataType_t dataType); + +size_t calculateElementCount(std::vector dims); + +std::tuple getFileSize(std::string filePath); + +StatusCode readDataFromFile(std::string filePath, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t* buffer); + +/* + * Read data in batches from vector and try to matches the model input's + * batches. If the vector is empty while matching the batch size of model, + * pad the remaining buffer with zeros + * @param filePaths image paths vector + * @param filePathsIndexOffset index offset in the vector + * @param loopBackToStart loop the vector to fill the remaining tensor data + * @param dims model input dimensions + * @param dataType to create input buffer from file + * @param buffer to fill the input image data + * + * @return ReadBatchDataRetType_t returns numFilesCopied and batchSize along + * with status + */ +ReadBatchDataRetType_t readBatchData(const std::vector& filePaths, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + const std::vector& dims, + const Qnn_DataType_t dataType, + uint8_t* buffer); + +StatusCode readBinaryFromFile(std::string filePath, uint8_t* buffer, size_t bufferSize); + +#ifndef __hexagon__ +StatusCode writeDataToFile(std::string fileDir, + std::string fileName, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t* buffer); + +StatusCode writeBatchDataToFile(std::vector fileDirs, + std::string fileName, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t* buffer, + const size_t batchSize); + +StatusCode writeBinaryToFile(std::string fileDir, + std::string fileName, + uint8_t* buffer, + size_t bufferSize); +#endif + +template +datautil::StatusCode floatToTfN( + T_QuantType* out, float* in, int32_t offset, float scale, size_t numElements); + +template +datautil::StatusCode tfNToFloat( + float* out, T_QuantType* in, int32_t offset, float scale, size_t numElements); + +template +datautil::StatusCode castToFloat(float* out, T_QuantType* in, size_t numElements); + +template +datautil::StatusCode castFromFloat(T_QuantType* out, float* in, size_t numElements); + +const std::map g_dataTypeToSize = { + {QNN_DATATYPE_INT_8, 1}, + {QNN_DATATYPE_INT_16, 2}, + {QNN_DATATYPE_INT_32, 4}, + {QNN_DATATYPE_INT_64, 8}, + {QNN_DATATYPE_UINT_8, 1}, + {QNN_DATATYPE_UINT_16, 2}, + {QNN_DATATYPE_UINT_32, 4}, + {QNN_DATATYPE_UINT_64, 8}, + {QNN_DATATYPE_FLOAT_16, 2}, + {QNN_DATATYPE_FLOAT_32, 4}, + {QNN_DATATYPE_FLOAT_64, 8}, + {QNN_DATATYPE_SFIXED_POINT_8, 1}, + {QNN_DATATYPE_SFIXED_POINT_16, 2}, + {QNN_DATATYPE_SFIXED_POINT_32, 4}, + {QNN_DATATYPE_UFIXED_POINT_8, 1}, + {QNN_DATATYPE_UFIXED_POINT_16, 2}, + {QNN_DATATYPE_UFIXED_POINT_32, 4}, + {QNN_DATATYPE_BOOL_8, 1}, +}; +} // namespace datautil +} // namespace tools +} // namespace qnn diff --git a/SampleApp/src/Utils/DynamicLoadUtil.cpp b/SampleApp/src/Utils/DynamicLoadUtil.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c0b4bc4f418b9120150e1d3a8d1e6ddb8a5606e5 --- /dev/null +++ b/SampleApp/src/Utils/DynamicLoadUtil.cpp @@ -0,0 +1,179 @@ +//============================================================================== +// +// Copyright (c) 2019-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include + +#include "DynamicLoadUtil.hpp" +#include "Logger.hpp" +#include "PAL/DynamicLoading.hpp" + +using namespace qnn; +using namespace qnn::tools; + +typedef Qnn_ErrorHandle_t (*QnnInterfaceGetProvidersFn_t)(const QnnInterface_t*** providerList, + uint32_t* numProviders); + +typedef Qnn_ErrorHandle_t (*QnnSystemInterfaceGetProvidersFn_t)( + const QnnSystemInterface_t*** providerList, uint32_t* numProviders); + +template +static inline T resolveSymbol(void* libHandle, const char* sym) { + T ptr = (T)pal::dynamicloading::dlSym(libHandle, sym); + if (ptr == nullptr) { + QNN_ERROR("Unable to access symbol [%s]. pal::dynamicloading::dlError(): %s", + sym, + pal::dynamicloading::dlError()); + } + return ptr; +} + +dynamicloadutil::StatusCode dynamicloadutil::getQnnFunctionPointers( + std::string backendPath, + std::string modelPath, + sample_app::QnnFunctionPointers* qnnFunctionPointers, + void** backendHandleRtn, + bool loadModelLib, + void** modelHandleRtn) { +#if defined(__ANDROID__) + void* libBackendHandle = pal::dynamicloading::dlOpen( + backendPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); +#else + void* libBackendHandle = pal::dynamicloading::dlOpen( + backendPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_GLOBAL); +#endif + if (nullptr == libBackendHandle) { + QNN_ERROR("Unable to load backend. pal::dynamicloading::dlError(): %s", + pal::dynamicloading::dlError()); + return StatusCode::FAIL_LOAD_BACKEND; + } + if (nullptr != backendHandleRtn) { + *backendHandleRtn = libBackendHandle; + } + // Get QNN Interface + QnnInterfaceGetProvidersFn_t getInterfaceProviders{nullptr}; + getInterfaceProviders = + resolveSymbol(libBackendHandle, "QnnInterface_getProviders"); + if (nullptr == getInterfaceProviders) { + return StatusCode::FAIL_SYM_FUNCTION; + } + QnnInterface_t** interfaceProviders{nullptr}; + uint32_t numProviders{0}; + if (QNN_SUCCESS != + getInterfaceProviders((const QnnInterface_t***)&interfaceProviders, &numProviders)) { + QNN_ERROR("Failed to get interface providers."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + if (nullptr == interfaceProviders) { + QNN_ERROR("Failed to get interface providers: null interface providers received."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + if (0 == numProviders) { + QNN_ERROR("Failed to get interface providers: 0 interface providers."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + bool foundValidInterface{false}; + for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { + if (QNN_API_VERSION_MAJOR == interfaceProviders[pIdx]->apiVersion.coreApiVersion.major && + QNN_API_VERSION_MINOR <= interfaceProviders[pIdx]->apiVersion.coreApiVersion.minor) { + foundValidInterface = true; + qnnFunctionPointers->qnnInterface = interfaceProviders[pIdx]->QNN_INTERFACE_VER_NAME; + break; + } + } + if (!foundValidInterface) { + QNN_ERROR("Unable to find a valid interface."); + libBackendHandle = nullptr; + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + + if (true == loadModelLib) { + QNN_INFO("Loading model shared library ([model].so)"); + void* libModelHandle = pal::dynamicloading::dlOpen( + modelPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); + if (nullptr == libModelHandle) { + QNN_ERROR("Unable to load model. pal::dynamicloading::dlError(): %s", + pal::dynamicloading::dlError()); + return StatusCode::FAIL_LOAD_MODEL; + } + if (nullptr != modelHandleRtn) { + *modelHandleRtn = libModelHandle; + } + + std::string modelPrepareFunc = "QnnModel_composeGraphs"; + qnnFunctionPointers->composeGraphsFnHandle = + resolveSymbol(libModelHandle, + modelPrepareFunc.c_str()); + if (nullptr == qnnFunctionPointers->composeGraphsFnHandle) { + return StatusCode::FAIL_SYM_FUNCTION; + } + + std::string modelFreeFunc = "QnnModel_freeGraphsInfo"; + qnnFunctionPointers->freeGraphInfoFnHandle = + resolveSymbol(libModelHandle, + modelFreeFunc.c_str()); + if (nullptr == qnnFunctionPointers->freeGraphInfoFnHandle) { + return StatusCode::FAIL_SYM_FUNCTION; + } + } else { + QNN_INFO("Model wasn't loaded from a shared library."); + } + return StatusCode::SUCCESS; +} + +dynamicloadutil::StatusCode dynamicloadutil::getQnnSystemFunctionPointers( + std::string systemLibraryPath, sample_app::QnnFunctionPointers* qnnFunctionPointers) { + QNN_FUNCTION_ENTRY_LOG; + if (!qnnFunctionPointers) { + QNN_ERROR("nullptr provided for qnnFunctionPointers"); + return StatusCode::FAILURE; + } + void* systemLibraryHandle = pal::dynamicloading::dlOpen( + systemLibraryPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); + if (nullptr == systemLibraryHandle) { + QNN_ERROR("Unable to load system library. pal::dynamicloading::dlError(): %s", + pal::dynamicloading::dlError()); + return StatusCode::FAIL_LOAD_SYSTEM_LIB; + } + QnnSystemInterfaceGetProvidersFn_t getSystemInterfaceProviders{nullptr}; + getSystemInterfaceProviders = resolveSymbol( + systemLibraryHandle, "QnnSystemInterface_getProviders"); + if (nullptr == getSystemInterfaceProviders) { + return StatusCode::FAIL_SYM_FUNCTION; + } + QnnSystemInterface_t** systemInterfaceProviders{nullptr}; + uint32_t numProviders{0}; + if (QNN_SUCCESS != getSystemInterfaceProviders( + (const QnnSystemInterface_t***)&systemInterfaceProviders, &numProviders)) { + QNN_ERROR("Failed to get system interface providers."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + if (nullptr == systemInterfaceProviders) { + QNN_ERROR("Failed to get system interface providers: null interface providers received."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + if (0 == numProviders) { + QNN_ERROR("Failed to get interface providers: 0 interface providers."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + bool foundValidSystemInterface{false}; + for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { + if (QNN_SYSTEM_API_VERSION_MAJOR == systemInterfaceProviders[pIdx]->systemApiVersion.major && + QNN_SYSTEM_API_VERSION_MINOR <= systemInterfaceProviders[pIdx]->systemApiVersion.minor) { + foundValidSystemInterface = true; + qnnFunctionPointers->qnnSystemInterface = + systemInterfaceProviders[pIdx]->QNN_SYSTEM_INTERFACE_VER_NAME; + break; + } + } + if (!foundValidSystemInterface) { + QNN_ERROR("Unable to find a valid system interface."); + return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; + } + QNN_FUNCTION_EXIT_LOG; + return StatusCode::SUCCESS; +} \ No newline at end of file diff --git a/SampleApp/src/Utils/DynamicLoadUtil.hpp b/SampleApp/src/Utils/DynamicLoadUtil.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7b16d4e60627d995782cfc18603337129e369109 --- /dev/null +++ b/SampleApp/src/Utils/DynamicLoadUtil.hpp @@ -0,0 +1,36 @@ +//============================================================================== +// +// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include "SampleApp.hpp" + +namespace qnn { +namespace tools { +namespace dynamicloadutil { +enum class StatusCode { + SUCCESS, + FAILURE, + FAIL_LOAD_BACKEND, + FAIL_LOAD_MODEL, + FAIL_SYM_FUNCTION, + FAIL_GET_INTERFACE_PROVIDERS, + FAIL_LOAD_SYSTEM_LIB, +}; + +StatusCode getQnnFunctionPointers(std::string backendPath, + std::string modelPath, + sample_app::QnnFunctionPointers* qnnFunctionPointers, + void** backendHandle, + bool loadModelLib, + void** modelHandleRtn); +StatusCode getQnnSystemFunctionPointers(std::string systemLibraryPath, + sample_app::QnnFunctionPointers* qnnFunctionPointers); +} // namespace dynamicloadutil +} // namespace tools +} // namespace qnn diff --git a/SampleApp/src/Utils/IOTensor.cpp b/SampleApp/src/Utils/IOTensor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c724adbeb5d9e15c97952dc5a63f7048ab77552 --- /dev/null +++ b/SampleApp/src/Utils/IOTensor.cpp @@ -0,0 +1,838 @@ +//============================================================================== +// +// Copyright (c) 2020-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== +#include +#include +#include +#include + +#include "DataUtil.hpp" +#include "IOTensor.hpp" +#include "Logger.hpp" +#ifndef __hexagon__ +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" +#endif +#include "PAL/StringOp.hpp" +#include "QnnTypeMacros.hpp" + +using namespace qnn; +using namespace qnn::tools; + +// Helper method to read data from files to a buffer. +iotensor::PopulateInputTensorsRetType_t iotensor::IOTensor::readDataAndAllocateBuffer( + const std::vector& filePaths, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t** bufferToCopy) { + StatusCode returnStatus = StatusCode::SUCCESS; + *bufferToCopy = nullptr; + returnStatus = allocateBuffer(bufferToCopy, dims, dataType); + size_t numFilesPopulated = 0; + size_t batchSize = 0; + datautil::StatusCode status; + std::tie(status, numFilesPopulated, batchSize) = + datautil::readBatchData(filePaths, + filePathsIndexOffset, + loopBackToStart, + dims, + dataType, + reinterpret_cast(*bufferToCopy)); + if (datautil::StatusCode::SUCCESS != status) { + QNN_ERROR("Failure in datautil::readBatchData"); + returnStatus = StatusCode::FAILURE; + } + if (StatusCode::SUCCESS != returnStatus) { + if (nullptr != *bufferToCopy) { + free(*bufferToCopy); + *bufferToCopy = nullptr; + } + } + return {returnStatus, numFilesPopulated, batchSize}; +} + +// Helper method to copy a float buffer, quantize it, and copy +// it to a tensor (Qnn_Tensor_t) buffer. +iotensor::StatusCode iotensor::IOTensor::copyFromFloatToNative(float* floatBuffer, + Qnn_Tensor_t* tensor) { + if (nullptr == floatBuffer || nullptr == tensor) { + QNN_ERROR("copyFromFloatToNative(): received a nullptr"); + return StatusCode::FAILURE; + } + + StatusCode returnStatus = StatusCode::SUCCESS; + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); + + switch (QNN_TENSOR_GET_DATA_TYPE(tensor)) { + case QNN_DATATYPE_UFIXED_POINT_8: + datautil::floatToTfN(static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, + datautil::calculateElementCount(dims)); + break; + + case QNN_DATATYPE_UFIXED_POINT_16: + datautil::floatToTfN(static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, + datautil::calculateElementCount(dims)); + break; + + case QNN_DATATYPE_UINT_8: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_UINT_16: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_UINT_32: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_INT_8: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_INT_16: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_INT_32: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_BOOL_8: + if (datautil::StatusCode::SUCCESS != + datautil::castFromFloat( + static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + floatBuffer, + datautil::calculateElementCount(dims))) { + QNN_ERROR("failure in castFromFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + default: + QNN_ERROR("Datatype not supported yet!"); + returnStatus = StatusCode::FAILURE; + break; + } + return returnStatus; +} + +// Helper method to populate an input tensor in the graph during execution. +// It relies on reading data from files provided during app creation. +iotensor::PopulateInputTensorsRetType_t iotensor::IOTensor::populateInputTensor( + const std::vector& filePaths, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + Qnn_Tensor_t* input, + iotensor::InputDataType inputDataType) { + if (nullptr == input) { + QNN_ERROR("input is nullptr"); + return {StatusCode::FAILURE, 0, 0}; + } + + auto returnStatus = StatusCode::SUCCESS; + size_t numFilesPopulated = 0; + size_t batchSize = 0; + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(input), QNN_TENSOR_GET_RANK(input)); + + if (inputDataType == InputDataType::FLOAT && + QNN_TENSOR_GET_DATA_TYPE(input) != QNN_DATATYPE_FLOAT_32) { + uint8_t* fileToBuffer = nullptr; + std::tie(returnStatus, numFilesPopulated, batchSize) = + readDataAndAllocateBuffer(filePaths, + filePathsIndexOffset, + loopBackToStart, + dims, + QNN_DATATYPE_FLOAT_32, + &fileToBuffer); + if (StatusCode::SUCCESS == returnStatus) { + QNN_DEBUG("readDataFromFileToBuffer successful"); + returnStatus = copyFromFloatToNative(reinterpret_cast(fileToBuffer), input); + } + if (nullptr != fileToBuffer) { + free(fileToBuffer); + fileToBuffer = nullptr; + } + } else { + datautil::StatusCode status; + std::tie(status, numFilesPopulated, batchSize) = + datautil::readBatchData(filePaths, + filePathsIndexOffset, + loopBackToStart, + dims, + QNN_TENSOR_GET_DATA_TYPE(input), + static_cast(QNN_TENSOR_GET_CLIENT_BUF(input).data)); + if (datautil::StatusCode::SUCCESS != status) { + QNN_ERROR("Failure in datautil::readBatchData"); + returnStatus = StatusCode::FAILURE; + } + } + return {returnStatus, numFilesPopulated, batchSize}; +} + +// Helper method to populate all input tensors during execution. +iotensor::PopulateInputTensorsRetType_t iotensor::IOTensor::populateInputTensors( + uint32_t graphIdx, + const std::vector>& filePathsVector, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + const std::unordered_map& inputNameToIndex, + Qnn_Tensor_t* inputs, + qnn_wrapper_api::GraphInfo_t graphInfo, + iotensor::InputDataType inputDataType) { + QNN_DEBUG("populateInputTensors() graphIndx %d", graphIdx); + if (nullptr == inputs) { + QNN_ERROR("inputs is nullptr"); + return {StatusCode::FAILURE, 0, 0}; + } + auto inputCount = graphInfo.numInputTensors; + if (filePathsVector.size() != inputCount) { + QNN_ERROR( + "Incorrect amount of Input files for graphIdx: %d. Expected: %d, " + "received: %d", + graphIdx, + inputCount, + filePathsVector.size()); + return {StatusCode::FAILURE, 0, 0}; + } + size_t numFilesPopulated = 0; + size_t numBatchSize = 0; + for (size_t inputIdx = 0; inputIdx < inputCount; inputIdx++) { + size_t inputNameIdx = inputIdx; + QNN_DEBUG("index = %d input column index = %d", inputIdx, inputNameIdx); + std::string inputNodeName; + if (QNN_TENSOR_GET_NAME(graphInfo.inputTensors[inputIdx])) + inputNodeName = QNN_TENSOR_GET_NAME(graphInfo.inputTensors[inputIdx]); + if (!inputNodeName.empty() && inputNameToIndex.find(inputNodeName) != inputNameToIndex.end()) { + inputNameIdx = inputNameToIndex.at(inputNodeName); + } + StatusCode returnStatus; + size_t currentInputNumFilesPopulated = 0; + size_t currentInputNumBatchSize = 0; + std::tie(returnStatus, currentInputNumFilesPopulated, currentInputNumBatchSize) = + populateInputTensor(filePathsVector[inputNameIdx], + filePathsIndexOffset, + loopBackToStart, + &(inputs[inputIdx]), + inputDataType); + if (StatusCode::SUCCESS != returnStatus) { + QNN_ERROR("populateInputTensorFromFiles failed for input %s with index %d", + inputNodeName.c_str(), + inputIdx); + return {StatusCode::FAILURE, currentInputNumFilesPopulated, currentInputNumBatchSize}; + } + if (inputIdx == 0) { + numFilesPopulated = currentInputNumFilesPopulated; + numBatchSize = currentInputNumBatchSize; + } else { + if (numFilesPopulated != currentInputNumFilesPopulated || + numBatchSize != currentInputNumBatchSize) { + QNN_ERROR( + "Current input tensor with name: %s with index %d files populated = %d, batch size = %d" + " does not match with expected files populated = %d, batch size = %d", + inputNodeName.c_str(), + inputIdx, + currentInputNumFilesPopulated, + currentInputNumBatchSize, + numFilesPopulated, + numBatchSize); + return {StatusCode::FAILURE, numFilesPopulated, numBatchSize}; + } + } + } + return {StatusCode::SUCCESS, numFilesPopulated, numBatchSize}; +} + +// Setup details for Qnn_Tensor_t for execution +// based on information in Qnn_TensorWrapper_t provided by model.so. +iotensor::StatusCode iotensor::IOTensor::setupTensors(Qnn_Tensor_t** tensors, + uint32_t tensorCount, + Qnn_Tensor_t* tensorWrappers) { + if (nullptr == tensorWrappers) { + QNN_ERROR("tensorWrappers is nullptr"); + return StatusCode::FAILURE; + } + if (0 == tensorCount) { + QNN_INFO("tensor count is 0. Nothing to setup."); + return StatusCode::SUCCESS; + } + auto returnStatus = StatusCode::SUCCESS; + *tensors = (Qnn_Tensor_t*)calloc(1, tensorCount * sizeof(Qnn_Tensor_t)); + if (nullptr == *tensors) { + QNN_ERROR("mem alloc failed for *tensors"); + returnStatus = StatusCode::FAILURE; + return returnStatus; + } + for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { + Qnn_Tensor_t wrapperTensor = tensorWrappers[tensorIdx]; + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(wrapperTensor), QNN_TENSOR_GET_RANK(wrapperTensor)); + if (StatusCode::SUCCESS == returnStatus) { + QNN_DEBUG("allocateBuffer successful"); + (*tensors)[tensorIdx] = QNN_TENSOR_INIT; + returnStatus = + (sample_app::deepCopyQnnTensorInfo(((*tensors) + tensorIdx), &wrapperTensor) == true + ? StatusCode::SUCCESS + : StatusCode::FAILURE); + } + if (StatusCode::SUCCESS == returnStatus) { + QNN_DEBUG("deepCopyQnnTensorInfo successful"); + QNN_TENSOR_SET_MEM_TYPE(((*tensors) + tensorIdx), QNN_TENSORMEMTYPE_RAW); + } + Qnn_ClientBuffer_t clientBuffer = QNN_CLIENT_BUFFER_INIT; + returnStatus = allocateBuffer(reinterpret_cast(&clientBuffer.data), + dims, + QNN_TENSOR_GET_DATA_TYPE((*tensors) + tensorIdx)); + datautil::StatusCode datautilStatus{datautil::StatusCode::SUCCESS}; + size_t length{0}; + std::tie(datautilStatus, length) = + datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE((*tensors) + tensorIdx)); + if (datautilStatus != datautil::StatusCode::SUCCESS) { + returnStatus = StatusCode::FAILURE; + } + clientBuffer.dataSize = length; + QNN_TENSOR_SET_CLIENT_BUF(((*tensors) + tensorIdx), clientBuffer); + if (StatusCode::SUCCESS != returnStatus) { + QNN_ERROR("Failure in setupTensors, cleaning up resources"); + if (nullptr != (QNN_TENSOR_GET_CLIENT_BUF((*tensors) + tensorIdx)).data) { + free(QNN_TENSOR_GET_CLIENT_BUF((*tensors) + tensorIdx).data); + } + tearDownTensors(*tensors, tensorIdx); + *tensors = nullptr; + returnStatus = StatusCode::FAILURE; + QNN_ERROR("Failure in setupTensors, done cleaning up resources"); + return returnStatus; + } + } + return returnStatus; +} + +// Setup details for all input and output tensors for graph execution. +iotensor::StatusCode iotensor::IOTensor::setupInputAndOutputTensors( + Qnn_Tensor_t** inputs, Qnn_Tensor_t** outputs, qnn_wrapper_api::GraphInfo_t graphInfo) { + auto returnStatus = StatusCode::SUCCESS; + if (StatusCode::SUCCESS != + setupTensors(inputs, graphInfo.numInputTensors, (graphInfo.inputTensors))) { + QNN_ERROR("Failure in setting up input tensors"); + returnStatus = StatusCode::FAILURE; + } + if (StatusCode::SUCCESS != + setupTensors(outputs, graphInfo.numOutputTensors, (graphInfo.outputTensors))) { + QNN_ERROR("Failure in setting up output tensors"); + returnStatus = StatusCode::FAILURE; + } + if (StatusCode::SUCCESS != returnStatus) { + QNN_ERROR("Failure in setupInputAndOutputTensors, cleaning up resources"); + if (nullptr != *inputs) { + QNN_DEBUG("cleaning up input tensors"); + tearDownTensors(*inputs, graphInfo.numInputTensors); + *inputs = nullptr; + } + if (nullptr != *outputs) { + QNN_DEBUG("cleaning up output tensors"); + tearDownTensors(*outputs, graphInfo.numOutputTensors); + *outputs = nullptr; + } + QNN_ERROR("Failure in setupInputAndOutputTensors, done cleaning up resources"); + } + return returnStatus; +} + +// Clean up all tensors related data after execution. +iotensor::StatusCode iotensor::IOTensor::tearDownTensors(Qnn_Tensor_t* tensors, + uint32_t tensorCount) { + for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { + QNN_DEBUG("freeing resources for tensor: %d", tensorIdx); + if (nullptr != QNN_TENSOR_GET_DIMENSIONS(tensors[tensorIdx])) { + QNN_DEBUG("freeing dimensions"); + free(QNN_TENSOR_GET_DIMENSIONS(tensors[tensorIdx])); + } + if (nullptr != QNN_TENSOR_GET_CLIENT_BUF(tensors[tensorIdx]).data) { + QNN_DEBUG("freeing clientBuf.data"); + free(QNN_TENSOR_GET_CLIENT_BUF(tensors[tensorIdx]).data); + } + } + free(tensors); + return StatusCode::SUCCESS; +} + +// Clean up all input and output tensors after execution. +iotensor::StatusCode iotensor::IOTensor::tearDownInputAndOutputTensors(Qnn_Tensor_t* inputs, + Qnn_Tensor_t* outputs, + size_t numInputTensors, + size_t numOutputTensors) { + if (nullptr != inputs) { + QNN_INFO("cleaning up resources for input tensors"); + tearDownTensors(inputs, numInputTensors); + inputs = nullptr; + } + if (nullptr != outputs) { + QNN_INFO("cleaning up resources for output tensors"); + tearDownTensors(outputs, numOutputTensors); + outputs = nullptr; + } + return StatusCode::SUCCESS; +} + +// Helper method to allocate a buffer. +iotensor::StatusCode iotensor::IOTensor::allocateBuffer(uint8_t** buffer, + std::vector dims, + Qnn_DataType_t dataType) { + size_t elementCount = datautil::calculateElementCount(dims); + auto returnStatus = StatusCode::SUCCESS; + switch (dataType) { + case QNN_DATATYPE_FLOAT_32: + QNN_DEBUG("allocating float buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_UINT_8: + case QNN_DATATYPE_UFIXED_POINT_8: + QNN_DEBUG("allocating uint8_t buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_UINT_16: + case QNN_DATATYPE_UFIXED_POINT_16: + QNN_DEBUG("allocating uint16_t buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_UINT_32: + QNN_DEBUG("allocating uint32_t buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_INT_8: + QNN_DEBUG("allocating int8_t buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_INT_16: + QNN_DEBUG("allocating int16_t buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_INT_32: + QNN_DEBUG("allocating int32_t buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + case QNN_DATATYPE_BOOL_8: + QNN_DEBUG("allocating bool buffer"); + returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); + break; + + default: + QNN_ERROR("Datatype not supported yet!"); + returnStatus = StatusCode::FAILURE; + break; + } + return returnStatus; +} + +// Helper method to allocate a buffer. +template +iotensor::StatusCode iotensor::IOTensor::allocateBuffer(T** buffer, size_t& elementCount) { + QNN_DEBUG("ElementCount: %d, sizeof(T): %d, total size: %d", + elementCount, + sizeof(T), + elementCount * sizeof(T)); + *buffer = (T*)malloc(elementCount * sizeof(T)); + if (nullptr == *buffer) { + QNN_ERROR("mem alloc failed for *buffer"); + return StatusCode::FAILURE; + } + return StatusCode::SUCCESS; +} + +// Convert data to float or de-quantization. This is used when +// user requests for float output and the model produces +// non-float output. +#ifndef __hexagon__ +iotensor::StatusCode iotensor::IOTensor::convertToFloat(float** out, Qnn_Tensor_t* tensor) { + if (nullptr == tensor) { + QNN_ERROR("tensors is nullptr"); + return StatusCode::FAILURE; + } + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); + auto returnStatus = StatusCode::SUCCESS; + size_t elementCount = datautil::calculateElementCount(dims); + returnStatus = allocateBuffer(out, elementCount); + if (StatusCode::SUCCESS != returnStatus) { + QNN_ERROR("failure in allocateBuffer"); + return returnStatus; + } + switch (QNN_TENSOR_GET_DATA_TYPE(tensor)) { + case QNN_DATATYPE_UFIXED_POINT_8: + if (datautil::StatusCode::SUCCESS != + datautil::tfNToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, + elementCount)) { + QNN_ERROR("failure in tfNToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_UFIXED_POINT_16: + if (datautil::StatusCode::SUCCESS != + datautil::tfNToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, + QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, + elementCount)) { + QNN_ERROR("failure in tfNToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_UINT_8: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_UINT_16: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_UINT_32: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_INT_8: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_INT_16: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_INT_32: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + case QNN_DATATYPE_BOOL_8: + if (datautil::StatusCode::SUCCESS != + datautil::castToFloat( + *out, + reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), + elementCount)) { + QNN_ERROR("failure in castToFloat"); + returnStatus = StatusCode::FAILURE; + } + break; + + default: + QNN_ERROR("Datatype not supported yet!"); + returnStatus = StatusCode::FAILURE; + break; + } + if (StatusCode::SUCCESS != returnStatus) { + QNN_DEBUG("freeing *out"); + if (*out != nullptr) { + free(*out); + *out = nullptr; + } + } + return returnStatus; +} + +// Helper method to convert Output tensors to float and write them +// out to files. +iotensor::StatusCode iotensor::IOTensor::convertAndWriteOutputTensorInFloat( + Qnn_Tensor_t* output, + std::vector outputPaths, + std::string fileName, + size_t outputBatchSize) { + if (nullptr == output) { + QNN_ERROR("output is nullptr"); + return StatusCode::FAILURE; + } + + auto returnStatus = StatusCode::SUCCESS; + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); + float* floatBuffer = nullptr; + returnStatus = convertToFloat(&floatBuffer, output); + if (StatusCode::SUCCESS != returnStatus) { + QNN_ERROR("failure in convertToFloat"); + return StatusCode::FAILURE; + } + uint8_t* bufferToWrite = reinterpret_cast(floatBuffer); + if (datautil::StatusCode::SUCCESS != + datautil::writeBatchDataToFile( + outputPaths, fileName, dims, QNN_DATATYPE_FLOAT_32, bufferToWrite, outputBatchSize)) { + QNN_ERROR("failure in writeBatchDataToFile"); + returnStatus = StatusCode::FAILURE; + } + if (nullptr != floatBuffer) { + QNN_DEBUG("freeing floatBuffer"); + free(floatBuffer); + floatBuffer = nullptr; + } + return returnStatus; +} + +// Helper method to write out output. There is no de-quantization here. +// Just write output as is to files. +iotensor::StatusCode iotensor::IOTensor::writeOutputTensor(Qnn_Tensor_t* output, + std::vector outputPaths, + std::string fileName, + size_t outputBatchSize) { + if (nullptr == output) { + QNN_ERROR("output is nullptr"); + return StatusCode::FAILURE; + } + auto returnStatus = StatusCode::SUCCESS; + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); + uint8_t* bufferToWrite = reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(output).data); + if (datautil::StatusCode::SUCCESS != + datautil::writeBatchDataToFile(outputPaths, + fileName, + dims, + QNN_TENSOR_GET_DATA_TYPE(output), + bufferToWrite, + outputBatchSize)) { + QNN_ERROR("failure in writeBatchDataToFile"); + returnStatus = StatusCode::FAILURE; + } + return returnStatus; +} + +// Write out all output tensors to files. If output_data_type is float, +// then all outputs will be raw floats regardless of what the model outputs. +// If the output_data_type is native, then output is written as produced by the model. +// Also, for native option, a json with quantization parameters is written out. +// If output_data_type is float_and_native, both above are done. +// If the output in the graph is float, then output_data_type has no effect. +iotensor::StatusCode iotensor::IOTensor::writeOutputTensors(uint32_t graphIdx, + size_t startIdx, + char* graphName, + Qnn_Tensor_t* outputs, + uint32_t numOutputs, + iotensor::OutputDataType outputDatatype, + uint32_t graphsCount, + std::string outputPath, + size_t numInputFilesPopulated, + size_t outputBatchSize) { + if (nullptr == outputs) { + QNN_ERROR("Received nullptr"); + return StatusCode::FAILURE; + } + if (graphsCount > 1) { + if (nullptr != graphName && strlen(graphName) > 0) { + outputPath += (pal::Path::getSeparator() + std::string(graphName)); + } else { + outputPath += (pal::Path::getSeparator() + std::string("Graph_") + std::to_string(graphIdx)); + } + } + auto returnStatus = StatusCode::SUCCESS; + std::vector outputPaths; + for (size_t idx = 0; idx < numInputFilesPopulated; idx++) { + std::string output = outputPath + (pal::Path::getSeparator() + std::string("Result_") + + std::to_string(startIdx + idx)); + outputPaths.push_back(output); + } + for (size_t outputIdx = 0; outputIdx < numOutputs; outputIdx++) { + QNN_DEBUG("Writing output for outputIdx: %d", outputIdx); + std::string outputFilePrefix; + if (nullptr != QNN_TENSOR_GET_NAME(outputs[outputIdx]) && + strlen(QNN_TENSOR_GET_NAME(outputs[outputIdx])) > 0) { + outputFilePrefix = std::string(QNN_TENSOR_GET_NAME(outputs[outputIdx])); + } else { + outputFilePrefix = std::string("Output_") + std::to_string(outputIdx); + } + auto outputFile = outputFilePrefix + std::string(".raw"); + auto outputFileNative = outputFilePrefix + std::string("_native.raw"); + if (QNN_TENSOR_GET_DATA_TYPE(outputs[outputIdx]) == QNN_DATATYPE_FLOAT_32) { + QNN_DEBUG("Writing in output->dataType == QNN_DATATYPE_FLOAT_32"); + returnStatus = + writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFile, outputBatchSize); + } else if (outputDatatype == OutputDataType::FLOAT_ONLY) { + QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_ONLY"); + returnStatus = convertAndWriteOutputTensorInFloat( + &(outputs[outputIdx]), outputPaths, outputFile, outputBatchSize); + } else if (outputDatatype == OutputDataType::NATIVE_ONLY) { + QNN_DEBUG("Writing in output->dataType == OutputDataType::NATIVE_ONLY"); + returnStatus = + writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFileNative, outputBatchSize); + } else if (outputDatatype == OutputDataType::FLOAT_AND_NATIVE) { + QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_AND_NATIVE"); + returnStatus = convertAndWriteOutputTensorInFloat( + &(outputs[outputIdx]), outputPaths, outputFile, outputBatchSize); + if (StatusCode::SUCCESS == returnStatus) { + returnStatus = writeOutputTensor( + &(outputs[outputIdx]), outputPaths, outputFileNative, outputBatchSize); + } + } + } + return returnStatus; +} +#endif + +// Helper method to allocate a buffer and copy data to it. +iotensor::StatusCode iotensor::IOTensor::allocateAndCopyBuffer(uint8_t** buffer, + Qnn_Tensor_t* tensor) { + if (nullptr == tensor) { + return StatusCode::FAILURE; + } + std::vector dims; + fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); + datautil::StatusCode datautilStatus; + size_t length; + std::tie(datautilStatus, length) = + datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE(tensor)); + if (datautilStatus != datautil::StatusCode::SUCCESS) { + return StatusCode::FAILURE; + } + if (StatusCode::SUCCESS != allocateBuffer(buffer, dims, QNN_TENSOR_GET_DATA_TYPE(tensor))) { + QNN_ERROR("failure in allocateBuffer"); + return StatusCode::FAILURE; + } + pal::StringOp::memscpy(*buffer, + length * sizeof(uint8_t), + QNN_TENSOR_GET_CLIENT_BUF(tensor).data, + length * sizeof(uint8_t)); + return StatusCode::SUCCESS; +} + +iotensor::StatusCode iotensor::IOTensor::fillDims(std::vector& dims, + uint32_t* inDimensions, + uint32_t rank) { + if (nullptr == inDimensions) { + QNN_ERROR("input dimensions is nullptr"); + return StatusCode::FAILURE; + } + for (size_t r = 0; r < rank; r++) { + dims.push_back(inDimensions[r]); + } + return StatusCode::SUCCESS; +} + +iotensor::OutputDataType iotensor::parseOutputDataType(std::string dataTypeString) { + std::transform(dataTypeString.begin(), dataTypeString.end(), dataTypeString.begin(), ::tolower); + OutputDataType parsedDataType = OutputDataType::INVALID; + if (dataTypeString == "float_only") { + parsedDataType = OutputDataType::FLOAT_ONLY; + } else if (dataTypeString == "native_only") { + parsedDataType = OutputDataType::NATIVE_ONLY; + } else if (dataTypeString == "float_and_native") { + parsedDataType = OutputDataType::FLOAT_AND_NATIVE; + } + return parsedDataType; +} + +iotensor::InputDataType iotensor::parseInputDataType(std::string dataTypeString) { + std::transform(dataTypeString.begin(), dataTypeString.end(), dataTypeString.begin(), ::tolower); + InputDataType parsedDataType = InputDataType::INVALID; + if (dataTypeString == "float") { + parsedDataType = InputDataType::FLOAT; + } else if (dataTypeString == "native") { + parsedDataType = InputDataType::NATIVE; + } + return parsedDataType; +} \ No newline at end of file diff --git a/SampleApp/src/Utils/IOTensor.hpp b/SampleApp/src/Utils/IOTensor.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a44086fcf0bd45e3d171961ddb77875f1b841c82 --- /dev/null +++ b/SampleApp/src/Utils/IOTensor.hpp @@ -0,0 +1,115 @@ +//============================================================================== +// +// Copyright (c) 2020, 2022-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== +#pragma once + +#include +#include + +#include "QnnBackend.h" +#include "QnnCommon.h" +#include "QnnContext.h" +#include "QnnGraph.h" +#include "QnnProperty.h" +#include "QnnSampleAppUtils.hpp" +#include "QnnTensor.h" +#include "QnnTypes.h" +#include "QnnWrapperUtils.hpp" + +namespace qnn { +namespace tools { +namespace iotensor { + +enum class StatusCode { SUCCESS, FAILURE }; +enum class OutputDataType { FLOAT_ONLY, NATIVE_ONLY, FLOAT_AND_NATIVE, INVALID }; +enum class InputDataType { FLOAT, NATIVE, INVALID }; + +OutputDataType parseOutputDataType(std::string dataTypeString); +InputDataType parseInputDataType(std::string dataTypeString); + +using PopulateInputTensorsRetType_t = std::tuple; + +class IOTensor { + public: + StatusCode setupInputAndOutputTensors(Qnn_Tensor_t **inputs, + Qnn_Tensor_t **outputs, + qnn_wrapper_api::GraphInfo_t graphInfo); + +#ifndef __hexagon__ + StatusCode writeOutputTensors(uint32_t graphIdx, + size_t startIdx, + char *graphName, + Qnn_Tensor_t *outputs, + uint32_t numOutputs, + OutputDataType outputDatatype, + uint32_t graphsCount, + std::string outputPath, + size_t numInputFilesPopulated, + size_t outputBatchSize); +#endif + + PopulateInputTensorsRetType_t populateInputTensors( + uint32_t graphIdx, + const std::vector> &filePathsVector, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + const std::unordered_map &inputNameToIndex, + Qnn_Tensor_t *inputs, + qnn_wrapper_api::GraphInfo_t graphInfo, + iotensor::InputDataType inputDataType); + + StatusCode tearDownInputAndOutputTensors(Qnn_Tensor_t *inputs, + Qnn_Tensor_t *outputs, + size_t numInputTensors, + size_t numOutputTensors); + + private: + PopulateInputTensorsRetType_t populateInputTensor(const std::vector &filePaths, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + Qnn_Tensor_t *input, + InputDataType inputDataType); + + PopulateInputTensorsRetType_t readDataAndAllocateBuffer(const std::vector &filePaths, + const size_t filePathsIndexOffset, + const bool loopBackToStart, + std::vector dims, + Qnn_DataType_t dataType, + uint8_t **bufferToCopy); + + template + StatusCode allocateBuffer(T **buffer, size_t &elementCount); + +#ifndef __hexagon__ + StatusCode convertToFloat(float **out, Qnn_Tensor_t *output); + + StatusCode convertAndWriteOutputTensorInFloat(Qnn_Tensor_t *output, + std::vector outputPaths, + std::string fileName, + size_t outputBatchSize); + + StatusCode writeOutputTensor(Qnn_Tensor_t *output, + std::vector outputPaths, + std::string fileName, + size_t outputBatchSize); +#endif + + StatusCode allocateAndCopyBuffer(uint8_t **buffer, Qnn_Tensor_t *tensor); + + StatusCode tearDownTensors(Qnn_Tensor_t *tensors, uint32_t tensorCount); + + StatusCode allocateBuffer(uint8_t **buffer, std::vector dims, Qnn_DataType_t dataType); + + StatusCode copyFromFloatToNative(float *floatBuffer, Qnn_Tensor_t *tensor); + + StatusCode setupTensors(Qnn_Tensor_t **tensors, uint32_t tensorCount, Qnn_Tensor_t *tensorsInfo); + + StatusCode fillDims(std::vector &dims, uint32_t *inDimensions, uint32_t rank); +}; +} // namespace iotensor +} // namespace tools +} // namespace qnn \ No newline at end of file diff --git a/SampleApp/src/Utils/QnnSampleAppUtils.cpp b/SampleApp/src/Utils/QnnSampleAppUtils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..53b409f7b32eaa09f396279aa1f10bddbd580671 --- /dev/null +++ b/SampleApp/src/Utils/QnnSampleAppUtils.cpp @@ -0,0 +1,394 @@ +//============================================================================== +// +// Copyright (c) 2019-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include + +#include "Logger.hpp" +#ifndef __hexagon__ +#include "PAL/Directory.hpp" +#include "PAL/FileOp.hpp" +#include "PAL/Path.hpp" +#endif +#include "PAL/StringOp.hpp" +#include "QnnSampleAppUtils.hpp" +#include "QnnTypeMacros.hpp" + +using namespace qnn; +using namespace qnn::tools; + +void sample_app::split(std::vector &splitString, + const std::string &tokenizedString, + const char separator) { + splitString.clear(); + std::istringstream tokenizedStringStream(tokenizedString); + while (!tokenizedStringStream.eof()) { + std::string value; + getline(tokenizedStringStream, value, separator); + if (!value.empty()) { + splitString.push_back(value); + } + } +} + +void sample_app::parseInputFilePaths(std::vector &inputFilePaths, + std::vector &paths, + std::string separator) { + for (auto &inputInfo : inputFilePaths) { + auto position = inputInfo.find(separator); + if (position != std::string::npos) { + auto path = inputInfo.substr(position + separator.size()); + paths.push_back(path); + } else { + paths.push_back(inputInfo); + } + } +} + +sample_app::ReadInputListsRetType_t sample_app::readInputLists( + std::vector inputFileListPaths) { + std::vector>> filePathsLists; + std::vector> inputNameToIndexMaps; + for (auto const &path : inputFileListPaths) { + bool readSuccess; + std::vector> filePathList; + std::unordered_map inputNameToIndex; + std::tie(filePathList, inputNameToIndex, readSuccess) = readInputList(path); + if (!readSuccess) { + filePathsLists.clear(); + return std::make_tuple(filePathsLists, inputNameToIndexMaps, false); + } + filePathsLists.push_back(filePathList); + inputNameToIndexMaps.push_back(inputNameToIndex); + } + return std::make_tuple(filePathsLists, inputNameToIndexMaps, true); +} + +sample_app::ReadInputListRetType_t sample_app::readInputList(const std::string inputFileListPath) { + std::queue lines; + std::ifstream fileListStream(inputFileListPath); + if (!fileListStream) { + QNN_ERROR("Failed to open input file: %s", inputFileListPath.c_str()); + return std::make_tuple(std::vector>{}, + std::unordered_map{}, + false); + } + + std::string fileLine; + while (std::getline(fileListStream, fileLine)) { + if (fileLine.empty()) continue; + lines.push(fileLine); + } + + if (!lines.empty() && lines.front().compare(0, 1, "#") == 0) { + lines.pop(); + } + + if (!lines.empty() && lines.front().compare(0, 1, "%") == 0) { + lines.pop(); + } + + std::string separator = ":="; + std::vector> filePathsList; + std::unordered_map inputNameToIndex; + if (!lines.empty()) { + inputNameToIndex = extractInputNameIndices(lines.front(), separator); + } + while (!lines.empty()) { + std::vector paths{}; + std::vector inputFilePaths; + split(inputFilePaths, lines.front(), ' '); + parseInputFilePaths(inputFilePaths, paths, separator); + filePathsList.reserve(paths.size()); + for (size_t idx = 0; idx < paths.size(); idx++) { + if (idx >= filePathsList.size()) { + filePathsList.push_back(std::vector()); + } + filePathsList[idx].push_back(paths[idx]); + } + lines.pop(); + } + return std::make_tuple(filePathsList, inputNameToIndex, true); +} + +std::unordered_map sample_app::extractInputNameIndices( + const std::string &inputLine, const std::string &separator) { + std::vector inputFilePaths; + std::unordered_map inputNameToIndex; + split(inputFilePaths, inputLine, ' '); + size_t inputCount = 0; + for (uint32_t idx = 0; idx < inputFilePaths.size(); idx++) { + auto position = inputFilePaths[idx].find(separator); + if (position != std::string::npos) { + auto unsanitizedTensorName = inputFilePaths[idx].substr(0, position); + auto sanitizedTensorName = sanitizeTensorName(unsanitizedTensorName); + if (sanitizedTensorName != unsanitizedTensorName) { + inputNameToIndex[unsanitizedTensorName] = idx; + } + inputNameToIndex[sanitizedTensorName] = idx; + inputCount = inputCount + 1; + } + } + return inputCount == inputFilePaths.size() ? inputNameToIndex + : std::unordered_map{}; +} + +std::string sample_app::sanitizeTensorName(std::string name) { + std::string sanitizedName = std::regex_replace(name, std::regex("\\W+"), "_"); + if (!std::isalpha(sanitizedName[0]) && sanitizedName[0] != '_') { + sanitizedName = "_" + sanitizedName; + } + return sanitizedName; +} + +sample_app::ProfilingLevel sample_app::parseProfilingLevel(std::string profilingLevelString) { + std::transform(profilingLevelString.begin(), + profilingLevelString.end(), + profilingLevelString.begin(), + ::tolower); + ProfilingLevel parsedProfilingLevel = ProfilingLevel::INVALID; + if (profilingLevelString == "off") { + parsedProfilingLevel = ProfilingLevel::OFF; + } else if (profilingLevelString == "basic") { + parsedProfilingLevel = ProfilingLevel::BASIC; + } else if (profilingLevelString == "detailed") { + parsedProfilingLevel = ProfilingLevel::DETAILED; + } + return parsedProfilingLevel; +} + +bool sample_app::deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src) { + if (nullptr == dst || nullptr == src) { + QNN_ERROR("Received nullptr"); + return false; + } + // set tensor.version before using QNN_TENSOR_SET macros, as they require the version to be set + // to correctly assign values + dst->version = src->version; + const char *tensorName = QNN_TENSOR_GET_NAME(src); + if (!tensorName) { + QNN_TENSOR_SET_NAME(dst, nullptr); + } else { + QNN_TENSOR_SET_NAME(dst, pal::StringOp::strndup(tensorName, strlen(tensorName))); + } + QNN_TENSOR_SET_ID(dst, QNN_TENSOR_GET_ID(src)); + QNN_TENSOR_SET_TYPE(dst, QNN_TENSOR_GET_TYPE(src)); + QNN_TENSOR_SET_DATA_FORMAT(dst, QNN_TENSOR_GET_DATA_FORMAT(src)); + QNN_TENSOR_SET_DATA_TYPE(dst, QNN_TENSOR_GET_DATA_TYPE(src)); + Qnn_QuantizeParams_t qParams = QNN_QUANTIZE_PARAMS_INIT; + qParams.encodingDefinition = QNN_TENSOR_GET_QUANT_PARAMS(src).encodingDefinition; + qParams.quantizationEncoding = QNN_QUANTIZATION_ENCODING_UNDEFINED; + if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET) { + qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; + qParams.scaleOffsetEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).scaleOffsetEncoding; + } else if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == + QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) { + qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; + qParams.axisScaleOffsetEncoding.axis = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.axis; + qParams.axisScaleOffsetEncoding.numScaleOffsets = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; + if (QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets > 0) { + qParams.axisScaleOffsetEncoding.scaleOffset = (Qnn_ScaleOffset_t *)malloc( + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets * + sizeof(Qnn_ScaleOffset_t)); + if (qParams.axisScaleOffsetEncoding.scaleOffset) { + for (size_t idx = 0; + idx < QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; + idx++) { + qParams.axisScaleOffsetEncoding.scaleOffset[idx].scale = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].scale; + qParams.axisScaleOffsetEncoding.scaleOffset[idx].offset = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].offset; + } + } + } + } + QNN_TENSOR_SET_QUANT_PARAMS(dst, qParams); + QNN_TENSOR_SET_RANK(dst, QNN_TENSOR_GET_RANK(src)); + QNN_TENSOR_SET_DIMENSIONS(dst, nullptr); + if (QNN_TENSOR_GET_RANK(src) > 0) { + QNN_TENSOR_SET_DIMENSIONS(dst, (uint32_t *)malloc(QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t))); + if (QNN_TENSOR_GET_DIMENSIONS(dst)) { + pal::StringOp::memscpy(QNN_TENSOR_GET_DIMENSIONS(dst), + QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t), + QNN_TENSOR_GET_DIMENSIONS(src), + QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t)); + } + if (QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(src)) { + QNN_TENSOR_SET_IS_DYNAMIC_DIMENSIONS( + dst, (uint8_t *)malloc(QNN_TENSOR_GET_RANK(src) * sizeof(uint8_t))); + pal::StringOp::memscpy(QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(dst), + QNN_TENSOR_GET_RANK(src) * sizeof(uint8_t), + QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(src), + QNN_TENSOR_GET_RANK(src) * sizeof(uint8_t)); + } + } + QNN_TENSOR_SET_SPARSE_PARAMS(dst, QNN_TENSOR_GET_SPARSE_PARAMS(src)); + return true; +} + +bool sample_app::copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, + Qnn_Tensor_t *&tensorWrappers, + uint32_t tensorsCount) { + QNN_FUNCTION_ENTRY_LOG; + auto returnStatus = true; + tensorWrappers = (Qnn_Tensor_t *)calloc(tensorsCount, sizeof(Qnn_Tensor_t)); + if (nullptr == tensorWrappers) { + QNN_ERROR("Failed to allocate memory for tensorWrappers."); + return false; + } + if (returnStatus) { + for (size_t tIdx = 0; tIdx < tensorsCount; tIdx++) { + QNN_DEBUG("Extracting tensorInfo for tensor Idx: %d", tIdx); + tensorWrappers[tIdx] = QNN_TENSOR_INIT; + deepCopyQnnTensorInfo(&tensorWrappers[tIdx], &tensorsInfoSrc[tIdx]); + } + } + QNN_FUNCTION_EXIT_LOG; + return returnStatus; +} + +bool sample_app::copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, + qnn_wrapper_api::GraphInfo_t *graphInfoDst) { + graphInfoDst->graphName = nullptr; + if (graphInfoSrc->graphName) { + graphInfoDst->graphName = + pal::StringOp::strndup(graphInfoSrc->graphName, strlen(graphInfoSrc->graphName)); + } + graphInfoDst->inputTensors = nullptr; + graphInfoDst->numInputTensors = 0; + if (graphInfoSrc->graphInputs) { + if (!copyTensorsInfo( + graphInfoSrc->graphInputs, graphInfoDst->inputTensors, graphInfoSrc->numGraphInputs)) { + return false; + } + graphInfoDst->numInputTensors = graphInfoSrc->numGraphInputs; + } + graphInfoDst->outputTensors = nullptr; + graphInfoDst->numOutputTensors = 0; + if (graphInfoSrc->graphOutputs) { + if (!copyTensorsInfo(graphInfoSrc->graphOutputs, + graphInfoDst->outputTensors, + graphInfoSrc->numGraphOutputs)) { + return false; + } + graphInfoDst->numOutputTensors = graphInfoSrc->numGraphOutputs; + } + return true; +} + +bool sample_app::copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, + const uint32_t numGraphs, + qnn_wrapper_api::GraphInfo_t **&graphsInfo) { + QNN_FUNCTION_ENTRY_LOG; + if (!graphsInput) { + QNN_ERROR("Received nullptr for graphsInput."); + return false; + } + auto returnStatus = true; + graphsInfo = + (qnn_wrapper_api::GraphInfo_t **)calloc(numGraphs, sizeof(qnn_wrapper_api::GraphInfo_t *)); + qnn_wrapper_api::GraphInfo_t *graphInfoArr = + (qnn_wrapper_api::GraphInfo_t *)calloc(numGraphs, sizeof(qnn_wrapper_api::GraphInfo_t)); + if (nullptr == graphsInfo || nullptr == graphInfoArr) { + QNN_ERROR("Failure to allocate memory for *graphInfo"); + returnStatus = false; + } + if (true == returnStatus) { + for (size_t gIdx = 0; gIdx < numGraphs; gIdx++) { + QNN_DEBUG("Extracting graphsInfo for graph Idx: %d", gIdx); + if (graphsInput[gIdx].version == QNN_SYSTEM_CONTEXT_GRAPH_INFO_VERSION_1) { + copyGraphsInfoV1(&graphsInput[gIdx].graphInfoV1, &graphInfoArr[gIdx]); + } + graphsInfo[gIdx] = graphInfoArr + gIdx; + } + } + if (true != returnStatus) { + QNN_ERROR("Received an ERROR during extractGraphsInfo. Freeing resources."); + if (graphsInfo) { + for (uint32_t gIdx = 0; gIdx < numGraphs; gIdx++) { + if (graphsInfo[gIdx]) { + if (nullptr != graphsInfo[gIdx]->graphName) { + free(graphsInfo[gIdx]->graphName); + graphsInfo[gIdx]->graphName = nullptr; + } + qnn_wrapper_api::freeQnnTensors(graphsInfo[gIdx]->inputTensors, + graphsInfo[gIdx]->numInputTensors); + qnn_wrapper_api::freeQnnTensors(graphsInfo[gIdx]->outputTensors, + graphsInfo[gIdx]->numOutputTensors); + } + } + free(*graphsInfo); + } + free(graphsInfo); + graphsInfo = nullptr; + } + QNN_FUNCTION_EXIT_LOG; + return true; +} + +bool sample_app::copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, + qnn_wrapper_api::GraphInfo_t **&graphsInfo, + uint32_t &graphsCount) { + if (nullptr == binaryInfo) { + QNN_ERROR("binaryInfo is nullptr."); + return false; + } + graphsCount = 0; + if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_1) { + if (binaryInfo->contextBinaryInfoV1.graphs) { + if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV1.graphs, + binaryInfo->contextBinaryInfoV1.numGraphs, + graphsInfo)) { + QNN_ERROR("Failed while copying graphs Info."); + return false; + } + graphsCount = binaryInfo->contextBinaryInfoV1.numGraphs; + return true; + } + } else if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_2) { + if (binaryInfo->contextBinaryInfoV2.graphs) { + if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV2.graphs, + binaryInfo->contextBinaryInfoV2.numGraphs, + graphsInfo)) { + QNN_ERROR("Failed while copying graphs Info."); + return false; + } + graphsCount = binaryInfo->contextBinaryInfoV2.numGraphs; + return true; + } + } + QNN_ERROR("Unrecognized system context binary info version."); + return false; +} + +QnnLog_Level_t sample_app::parseLogLevel(std::string logLevelString) { + QNN_FUNCTION_ENTRY_LOG; + std::transform(logLevelString.begin(), logLevelString.end(), logLevelString.begin(), ::tolower); + QnnLog_Level_t parsedLogLevel = QNN_LOG_LEVEL_MAX; + if (logLevelString == "error") { + parsedLogLevel = QNN_LOG_LEVEL_ERROR; + } else if (logLevelString == "warn") { + parsedLogLevel = QNN_LOG_LEVEL_WARN; + } else if (logLevelString == "info") { + parsedLogLevel = QNN_LOG_LEVEL_INFO; + } else if (logLevelString == "verbose") { + parsedLogLevel = QNN_LOG_LEVEL_VERBOSE; + } else if (logLevelString == "debug") { + parsedLogLevel = QNN_LOG_LEVEL_DEBUG; + } + QNN_FUNCTION_EXIT_LOG; + return parsedLogLevel; +} diff --git a/SampleApp/src/Utils/QnnSampleAppUtils.hpp b/SampleApp/src/Utils/QnnSampleAppUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..caa08f0668ed1a1f261042cb1bc73ef86e1f3836 --- /dev/null +++ b/SampleApp/src/Utils/QnnSampleAppUtils.hpp @@ -0,0 +1,79 @@ +//============================================================================== +// +// Copyright (c) 2019-2023 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "SampleApp.hpp" + +namespace qnn { +namespace tools { +namespace sample_app { + +enum class ProfilingLevel { OFF, BASIC, DETAILED, INVALID }; + +using ReadInputListRetType_t = std:: + tuple>, std::unordered_map, bool>; + +ReadInputListRetType_t readInputList(std::string inputFileListPath); + +using ReadInputListsRetType_t = std::tuple>>, + std::vector>, + bool>; + +ReadInputListsRetType_t readInputLists(std::vector inputFileListPath); + +std::unordered_map extractInputNameIndices(const std::string &inputLine, + const std::string &separator); + +std::string sanitizeTensorName(std::string name); + +ProfilingLevel parseProfilingLevel(std::string profilingLevelString); + +void parseInputFilePaths(std::vector &inputFilePaths, + std::vector &paths, + std::string separator); + +void split(std::vector &splitString, + const std::string &tokenizedString, + const char separator); + +bool copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, + qnn_wrapper_api::GraphInfo_t **&graphsInfo, + uint32_t &graphsCount); + +bool copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, + const uint32_t numGraphs, + qnn_wrapper_api::GraphInfo_t **&graphsInfo); + +bool copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, + qnn_wrapper_api::GraphInfo_t *graphInfoDst); + +bool copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, + Qnn_Tensor_t *&tensorWrappers, + uint32_t tensorsCount); + +bool deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src); + +QnnLog_Level_t parseLogLevel(std::string logLevelString); + +void inline exitWithMessage(std::string &&msg, int code) { + std::cerr << msg << std::endl; + std::exit(code); +} + +} // namespace sample_app +} // namespace tools +} // namespace qnn \ No newline at end of file diff --git a/SampleApp/src/WrapperUtils/QnnWrapperUtils.cpp b/SampleApp/src/WrapperUtils/QnnWrapperUtils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6b1d1ac53a18393346929b75fe7f3531bd2e1b32 --- /dev/null +++ b/SampleApp/src/WrapperUtils/QnnWrapperUtils.cpp @@ -0,0 +1,48 @@ +//============================================================================== +// +// Copyright (c) 2020, 2022-2024 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include + +#include "QnnTypeMacros.hpp" +#include "QnnWrapperUtils.hpp" + +qnn_wrapper_api::ModelError_t qnn_wrapper_api::freeQnnTensor(Qnn_Tensor_t &tensor) { + // free all pointer allocations in struct + free((void *)QNN_TENSOR_GET_NAME(tensor)); + free(QNN_TENSOR_GET_DIMENSIONS(tensor)); + if (QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor)) { + free(QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor)); + } + return MODEL_NO_ERROR; +} + +qnn_wrapper_api::ModelError_t qnn_wrapper_api::freeQnnTensors(Qnn_Tensor_t *&tensors, + uint32_t numTensors) { + // free all pointer allocations in struct + for (size_t i = 0; i < numTensors; i++) { + freeQnnTensor(tensors[i]); + } + free(tensors); + return MODEL_NO_ERROR; +} + +qnn_wrapper_api::ModelError_t qnn_wrapper_api::freeGraphsInfo(GraphInfoPtr_t **graphsInfo, + uint32_t numGraphs) { + if (graphsInfo == nullptr || *graphsInfo == nullptr) { + return MODEL_TENSOR_ERROR; + } + for (uint32_t i = 0; i < numGraphs; i++) { + free((*graphsInfo)[i]->graphName); + freeQnnTensors((*graphsInfo)[i]->inputTensors, (*graphsInfo)[i]->numInputTensors); + freeQnnTensors((*graphsInfo)[i]->outputTensors, (*graphsInfo)[i]->numOutputTensors); + } + free(**graphsInfo); + free(*graphsInfo); + *graphsInfo = nullptr; + return MODEL_NO_ERROR; +} diff --git a/SampleApp/src/WrapperUtils/QnnWrapperUtils.hpp b/SampleApp/src/WrapperUtils/QnnWrapperUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a2fb100869d773f212dd3146dbf26151fa58213f --- /dev/null +++ b/SampleApp/src/WrapperUtils/QnnWrapperUtils.hpp @@ -0,0 +1,166 @@ +//============================================================================== +// +// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#pragma once + +#include "QnnContext.h" +#include "QnnGraph.h" +#include "QnnTensor.h" +#include "QnnTypes.h" + +namespace qnn_wrapper_api { + +// macro utils + +// Enables FILE[LINE]: FMT for VALIDATE macro +#ifdef QNN_ENABLE_DEBUG + +#define PRINTF(fmt, ...) \ + do { \ + printf("%s[%d]: ", __FILE__, __LINE__); \ + printf((fmt), ##__VA_ARGS__); \ + } while (0) + +#else + +#define PRINTF(fmt, ...) \ + do { \ + printf((fmt), ##__VA_ARGS__); \ + } while (0) + +#endif + +#ifdef QNN_ENABLE_DEBUG +#define PRINT_DEBUG(fmt, ...) \ + do { \ + printf("[ DEBUG ] "); \ + PRINTF((fmt), ##__VA_ARGS__); \ + } while (0) +#else +#define PRINT_DEBUG(fmt, ...) +#endif + +// Enables ERROR tag for errors +#define PRINT_ERROR(fmt, ...) \ + do { \ + printf("[ ERROR ] "); \ + PRINTF((fmt), ##__VA_ARGS__); \ + } while (0) + +// Enables WARNING tag for errors +#define PRINT_WARNING(fmt, ...) \ + do { \ + printf("[ WARNING ] "); \ + PRINTF((fmt), ##__VA_ARGS__); \ + } while (0) + +// Enables INFO tag for errors +#define PRINT_INFO(fmt, ...) \ + do { \ + printf("[ INFO ] "); \ + PRINTF((fmt), ##__VA_ARGS__); \ + } while (0) + +#define STRINGFY(str) str +#define STRINGFYVALUE(str) STRINGFY(str) + +// Ensures ModelError_t returning functions return MODEL_NO_ERROR +// retStatus should be set to MODEL_NO_ERROR before passing to macro +#define VALIDATE(value, retStatus) \ + do { \ + retStatus = value; \ + if (retStatus != qnn_wrapper_api::MODEL_NO_ERROR) { \ + PRINT_ERROR( \ + "%s expected MODEL_NO_ERROR, got %s\n", #value, getModelErrorName(retStatus).c_str()); \ + return retStatus; \ + } \ + } while (0) + +// macros for retrieving binary data +#define BINVARSTART(NAME) \ + ({ \ + extern const uint8_t _binary_obj_binary_##NAME##_raw_start[]; \ + (void *)_binary_obj_binary_##NAME##_raw_start; \ + }) +#define BINVAREND(NAME) \ + ({ \ + extern const uint8_t _binary_obj_binary_##NAME##_raw_end[]; \ + (void *)_binary_obj_binary_##NAME##_raw_end; \ + }) +#define BINLEN(NAME) \ + ({ \ + extern const uint8_t _binary_obj_binary_##NAME##_raw_start[]; \ + extern const uint8_t _binary_obj_binary_##NAME##_raw_end[]; \ + (uint32_t)((_binary_obj_binary_##NAME##_raw_end) - (_binary_obj_binary_##NAME##_raw_start)); \ + }) + +typedef enum ModelError { + MODEL_NO_ERROR = 0, + MODEL_TENSOR_ERROR = 1, + MODEL_PARAMS_ERROR = 2, + MODEL_NODES_ERROR = 3, + MODEL_GRAPH_ERROR = 4, + MODEL_CONTEXT_ERROR = 5, + MODEL_GENERATION_ERROR = 6, + MODEL_SETUP_ERROR = 7, + MODEL_INVALID_ARGUMENT_ERROR = 8, + MODEL_FILE_ERROR = 9, + MODEL_MEMORY_ALLOCATE_ERROR = 10, + // Value selected to ensure 32 bits. + MODEL_UNKNOWN_ERROR = 0x7FFFFFFF +} ModelError_t; + +typedef struct GraphInfo { + Qnn_GraphHandle_t graph; + char *graphName; + Qnn_Tensor_t *inputTensors; + uint32_t numInputTensors; + Qnn_Tensor_t *outputTensors; + uint32_t numOutputTensors; +} GraphInfo_t; +typedef GraphInfo_t *GraphInfoPtr_t; + +typedef struct GraphConfigInfo { + char *graphName; + const QnnGraph_Config_t **graphConfigs; +} GraphConfigInfo_t; + +/** + * @brief Frees all memory allocated tensor attributes. + * + * @param[in] tensor Qnn_Tensor_t object to free + * + * @return Error code + */ +ModelError_t freeQnnTensor(Qnn_Tensor_t &tensor); + +/** + * @brief Loops through and frees all memory allocated tensor attributes for each tensor + * object. + * + * @param[in] tensors array of tensor objects to free + * + * @param[in] numTensors length of the above tensors array + * + * @return Error code + */ +ModelError_t freeQnnTensors(Qnn_Tensor_t *&tensors, uint32_t numTensors); + +/** + * @brief A helper function to free memory malloced for communicating the Graph for a model(s) + * + * @param[in] graphsInfo Pointer pointing to location of graph objects + * + * @param[in] numGraphs The number of graph objects the above pointer is pointing to + * + * @return Error code + * + */ +ModelError_t freeGraphsInfo(GraphInfoPtr_t **graphsInfo, uint32_t numGraphs); + +} // namespace qnn_wrapper_api diff --git a/SampleApp/src/main.cpp b/SampleApp/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b8faee941a8b28e316b59cdf2492a91735a35ab4 --- /dev/null +++ b/SampleApp/src/main.cpp @@ -0,0 +1,456 @@ +//============================================================================== +// +// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. +// All Rights Reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +#include +#include +#include + +#include "BuildId.hpp" +#include "DynamicLoadUtil.hpp" +#include "Logger.hpp" +#include "PAL/DynamicLoading.hpp" +#include "PAL/GetOpt.hpp" +#include "QnnSampleApp.hpp" +#include "QnnSampleAppUtils.hpp" + +static void* sg_backendHandle{nullptr}; +static void* sg_modelHandle{nullptr}; + +namespace qnn { +namespace tools { +namespace sample_app { + +void showHelp() { + std::cout + << "\nDESCRIPTION:\n" + << "------------\n" + << "Sample application demonstrating how to load and execute a neural network\n" + << "using QNN APIs.\n" + << "\n\n" + << "REQUIRED ARGUMENTS:\n" + << "-------------------\n" + << " --model Path to the model containing a QNN network.\n" + << "\n" + << " --backend Path to a QNN backend to execute the model.\n" + << "\n" + << " --input_list Path to a file listing the inputs for the network.\n" + << " If there are multiple graphs in model.so, this has\n" + << " to be comma separated list of input list files.\n" + << "\n" + << " --retrieve_context Path to cached binary from which to load a saved\n" + " context from and execute graphs. --retrieve_context " + "and\n" + " --model are mutually exclusive. Only one of the " + "options\n" + " can be specified at a time.\n" + << "\n\n" + + << "OPTIONAL ARGUMENTS:\n" + << "-------------------\n" + + << " --debug Specifies that output from all layers of the network\n" + << " will be saved.\n" + << "\n" + << " --output_dir The directory to save output to. Defaults to " + "./output.\n" + << "\n" + << " --output_data_type Data type of the output. Values can be:\n\n" + " 1. float_only: dump outputs in float only.\n" + " 2. native_only: dump outputs in data type " + "native\n" + " to the model. For ex., " + "uint8_t.\n" + " 3. float_and_native: dump outputs in both float and\n" + " native.\n\n" + " (This is N/A for a float model. In other cases,\n" + " if not specified, defaults to float_only.)\n" + << "\n" + << " --input_data_type Data type of the input. Values can be:\n\n" + " 1. float: reads inputs as floats and quantizes\n" + " if necessary based on quantization\n" + " parameters in the model.\n" + " 2. native: reads inputs assuming the data type to " + "be\n" + " native to the model. For ex., " + "uint8_t.\n\n" + " (This is N/A for a float model. In other cases,\n" + " if not specified, defaults to float.)\n" + << "\n" + << " --op_packages Provide a comma separated list of op packages \n" + " and interface providers to register. The syntax is:\n" + " " + "op_package_path:interface_provider[,op_package_path:interface_provider...]\n" + << "\n" + << " --profiling_level Enable profiling. Valid Values:\n" + " 1. basic: captures execution and init time.\n" + " 2. detailed: in addition to basic, captures\n" + " per Op timing for execution.\n" + << "\n" + << " --save_context Specifies that the backend context and metadata " + "related \n" + " to graphs be saved to a binary file.\n" + " Value of this parameter is the name of the name\n" + " required to save the context binary to.\n" + " Saved in the same path as --output_dir option.\n" + " Note: --retrieve_context and --save_context are " + "mutually\n" + " exclusive. Both options should not be specified at\n" + " the same time.\n" + << "\n" +#ifdef QNN_ENABLE_DEBUG + << " --log_level Specifies max logging level to be set. Valid " + "settings: \n" + " \"error\", \"warn\", \"info\", \"verbose\" and " + "\"debug\"." + "\n" +#else + << " --log_level Specifies max logging level to be set. Valid " + "settings: \n" + " \"error\", \"warn\", \"info\" and \"verbose\"." + "\n" +#endif + << "\n" + << " --system_library Path to QNN System library (libQnnSystem.so) needed to " + "exercise reflection APIs\n" + " when loading a context from a binary cache.\n" + " libQnnSystem.so is provided under /lib in the " + "SDK.\n" + "\n" + << " --version Print the QNN SDK version.\n" + << "\n" + << " --help Show this help message.\n" + << std::endl; +} + +void showHelpAndExit(std::string&& error) { + std::cerr << "ERROR: " << error << "\n"; + std::cerr << "Please check help below:\n"; + showHelp(); + std::exit(EXIT_FAILURE); +} + +std::unique_ptr processCommandLine(int argc, + char** argv, + bool& loadFromCachedBinary) { + enum OPTIONS { + OPT_HELP = 0, + OPT_MODEL = 1, + OPT_BACKEND = 2, + OPT_INPUT_LIST = 3, + OPT_OUTPUT_DIR = 4, + OPT_OP_PACKAGES = 5, + OPT_DEBUG_OUTPUTS = 6, + OPT_OUTPUT_DATA_TYPE = 7, + OPT_INPUT_DATA_TYPE = 8, + OPT_LOG_LEVEL = 9, + OPT_PROFILING_LEVEL = 10, + OPT_RETRIEVE_CONTEXT = 11, + OPT_SAVE_CONTEXT = 12, + OPT_VERSION = 13, + OPT_SYSTEM_LIBRARY = 14 + }; + + // Create the command line options + static struct pal::Option s_longOptions[] = { + {"help", pal::no_argument, NULL, OPT_HELP}, + {"model", pal::required_argument, NULL, OPT_MODEL}, + {"backend", pal::required_argument, NULL, OPT_BACKEND}, + {"input_list", pal::required_argument, NULL, OPT_INPUT_LIST}, + {"output_dir", pal::required_argument, NULL, OPT_OUTPUT_DIR}, + {"op_packages", pal::required_argument, NULL, OPT_OP_PACKAGES}, + {"debug", pal::no_argument, NULL, OPT_DEBUG_OUTPUTS}, + {"output_data_type", pal::required_argument, NULL, OPT_OUTPUT_DATA_TYPE}, + {"input_data_type", pal::required_argument, NULL, OPT_INPUT_DATA_TYPE}, + {"profiling_level", pal::required_argument, NULL, OPT_PROFILING_LEVEL}, + {"log_level", pal::required_argument, NULL, OPT_LOG_LEVEL}, + {"retrieve_context", pal::required_argument, NULL, OPT_RETRIEVE_CONTEXT}, + {"save_context", pal::required_argument, NULL, OPT_SAVE_CONTEXT}, + {"system_library", pal::required_argument, NULL, OPT_SYSTEM_LIBRARY}, + {"version", pal::no_argument, NULL, OPT_VERSION}, + {NULL, 0, NULL, 0}}; + + // Command line parsing loop + int longIndex = 0; + int opt = 0; + std::string modelPath; + std::string backEndPath; + std::string inputListPaths; + bool debug = false; + std::string outputPath; + std::string opPackagePaths; + iotensor::OutputDataType parsedOutputDataType = iotensor::OutputDataType::FLOAT_ONLY; + iotensor::InputDataType parsedInputDataType = iotensor::InputDataType::FLOAT; + sample_app::ProfilingLevel parsedProfilingLevel = ProfilingLevel::OFF; + bool dumpOutputs = true; + std::string cachedBinaryPath; + std::string saveBinaryName; + QnnLog_Level_t logLevel{QNN_LOG_LEVEL_ERROR}; + std::string systemLibraryPath; + while ((opt = pal::getOptLongOnly(argc, argv, "", s_longOptions, &longIndex)) != -1) { + switch (opt) { + case OPT_HELP: + showHelp(); + std::exit(EXIT_SUCCESS); + break; + + case OPT_VERSION: + std::cout << "QNN SDK " << qnn::tools::getBuildId() << "\n"; + std::exit(EXIT_SUCCESS); + break; + + case OPT_MODEL: + modelPath = pal::g_optArg; + break; + + case OPT_BACKEND: + backEndPath = pal::g_optArg; + break; + + case OPT_INPUT_LIST: + inputListPaths = pal::g_optArg; + break; + + case OPT_DEBUG_OUTPUTS: + debug = true; + break; + + case OPT_OUTPUT_DIR: + outputPath = pal::g_optArg; + break; + + case OPT_OP_PACKAGES: + opPackagePaths = pal::g_optArg; + break; + + case OPT_OUTPUT_DATA_TYPE: + parsedOutputDataType = iotensor::parseOutputDataType(pal::g_optArg); + if (parsedOutputDataType == iotensor::OutputDataType::INVALID) { + showHelpAndExit("Invalid output data type string."); + } + break; + + case OPT_INPUT_DATA_TYPE: + parsedInputDataType = iotensor::parseInputDataType(pal::g_optArg); + if (parsedInputDataType == iotensor::InputDataType::INVALID) { + showHelpAndExit("Invalid input data type string."); + } + break; + + case OPT_PROFILING_LEVEL: + parsedProfilingLevel = sample_app::parseProfilingLevel(pal::g_optArg); + if (parsedProfilingLevel == sample_app::ProfilingLevel::INVALID) { + showHelpAndExit("Invalid profiling level."); + } + break; + + case OPT_LOG_LEVEL: + logLevel = sample_app::parseLogLevel(pal::g_optArg); + if (logLevel != QNN_LOG_LEVEL_MAX) { + if (!log::setLogLevel(logLevel)) { + showHelpAndExit("Unable to set log level."); + } + } + break; + + case OPT_RETRIEVE_CONTEXT: + loadFromCachedBinary = true; + cachedBinaryPath = pal::g_optArg; + if (cachedBinaryPath.empty()) { + showHelpAndExit("Cached context binary file not specified."); + } + break; + + case OPT_SAVE_CONTEXT: + saveBinaryName = pal::g_optArg; + if (saveBinaryName.empty()) { + showHelpAndExit("Save context needs a file name."); + } + break; + + case OPT_SYSTEM_LIBRARY: + systemLibraryPath = pal::g_optArg; + if (systemLibraryPath.empty()) { + showHelpAndExit("System library (libQnnSystem.so) path not specified."); + } + break; + + default: + std::cerr << "ERROR: Invalid argument passed: " << argv[pal::g_optInd - 1] + << "\nPlease check the Arguments section in the description below.\n"; + showHelp(); + std::exit(EXIT_FAILURE); + } + } + + if (!modelPath.empty()) { + if (!cachedBinaryPath.empty()) { + showHelpAndExit( + "Error: both --model and --cached_binary specified. Only one option is valid at a " + "time.\n"); + } + } else { + if (cachedBinaryPath.empty()) { + showHelpAndExit("Missing option: --model\n"); + } + } + + if (!cachedBinaryPath.empty() && !saveBinaryName.empty()) { + showHelpAndExit("Error: both --cached_binary and --save_binary specified"); + } + + if (backEndPath.empty()) { + showHelpAndExit("Missing option: --backend\n"); + } + + if (inputListPaths.empty()) { + showHelpAndExit("Missing option: --input_list\n"); + } + + if (loadFromCachedBinary && systemLibraryPath.empty()) { + showHelpAndExit( + "Missing option: --system_library. QNN System shared library (libQnnSystem.so) is needed " + "to load from a cached binary\n"); + } + + QNN_INFO("Model: %s", modelPath.c_str()); + QNN_INFO("Backend: %s", backEndPath.c_str()); + + QnnFunctionPointers qnnFunctionPointers; + // Load backend and model .so and validate all the required function symbols are resolved + auto statusCode = dynamicloadutil::getQnnFunctionPointers(backEndPath, + modelPath, + &qnnFunctionPointers, + &sg_backendHandle, + !loadFromCachedBinary, + &sg_modelHandle); + if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { + if (dynamicloadutil::StatusCode::FAIL_LOAD_BACKEND == statusCode) { + exitWithMessage( + "Error initializing QNN Function Pointers: could not load backend: " + backEndPath, + EXIT_FAILURE); + } else if (dynamicloadutil::StatusCode::FAIL_LOAD_MODEL == statusCode) { + exitWithMessage( + "Error initializing QNN Function Pointers: could not load model: " + modelPath, + EXIT_FAILURE); + } else { + exitWithMessage("Error initializing QNN Function Pointers", EXIT_FAILURE); + } + } + + if (loadFromCachedBinary) { + statusCode = + dynamicloadutil::getQnnSystemFunctionPointers(systemLibraryPath, &qnnFunctionPointers); + if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { + exitWithMessage("Error initializing QNN System Function Pointers", EXIT_FAILURE); + } + } + + std::unique_ptr app(new sample_app::QnnSampleApp(qnnFunctionPointers, + inputListPaths, + opPackagePaths, + sg_backendHandle, + outputPath, + debug, + parsedOutputDataType, + parsedInputDataType, + parsedProfilingLevel, + dumpOutputs, + cachedBinaryPath, + saveBinaryName)); + return app; +} + +} // namespace sample_app +} // namespace tools +} // namespace qnn + +int main(int argc, char** argv) { + using namespace qnn::tools; + + if (!qnn::log::initializeLogging()) { + std::cerr << "ERROR: Unable to initialize logging!\n"; + return EXIT_FAILURE; + } + + { + bool loadFromCachedBinary{false}; + std::unique_ptr app = + sample_app::processCommandLine(argc, argv, loadFromCachedBinary); + + if (nullptr == app) { + return EXIT_FAILURE; + } + + QNN_INFO("qnn-sample-app build version: %s", qnn::tools::getBuildId().c_str()); + QNN_INFO("Backend build version: %s", app->getBackendBuildId().c_str()); + + if (sample_app::StatusCode::SUCCESS != app->initialize()) { + return app->reportError("Initialization failure"); + } + + if (sample_app::StatusCode::SUCCESS != app->initializeBackend()) { + return app->reportError("Backend Initialization failure"); + } + + auto devicePropertySupportStatus = app->isDevicePropertySupported(); + if (sample_app::StatusCode::FAILURE != devicePropertySupportStatus) { + auto createDeviceStatus = app->createDevice(); + if (sample_app::StatusCode::SUCCESS != createDeviceStatus) { + return app->reportError("Device Creation failure"); + } + } + + if (sample_app::StatusCode::SUCCESS != app->initializeProfiling()) { + return app->reportError("Profiling Initialization failure"); + } + + if (sample_app::StatusCode::SUCCESS != app->registerOpPackages()) { + return app->reportError("Register Op Packages failure"); + } + + if (!loadFromCachedBinary) { + if (sample_app::StatusCode::SUCCESS != app->createContext()) { + return app->reportError("Context Creation failure"); + } + if (sample_app::StatusCode::SUCCESS != app->composeGraphs()) { + return app->reportError("Graph Prepare failure"); + } + if (sample_app::StatusCode::SUCCESS != app->finalizeGraphs()) { + return app->reportError("Graph Finalize failure"); + } + } else { + if (sample_app::StatusCode::SUCCESS != app->createFromBinary()) { + return app->reportError("Create From Binary failure"); + } + } + + if (sample_app::StatusCode::SUCCESS != app->executeGraphs()) { + return app->reportError("Graph Execution failure"); + } + + if (sample_app::StatusCode::SUCCESS != app->freeContext()) { + return app->reportError("Context Free failure"); + } + + if (sample_app::StatusCode::FAILURE != devicePropertySupportStatus) { + auto freeDeviceStatus = app->freeDevice(); + if (sample_app::StatusCode::SUCCESS != freeDeviceStatus) { + return app->reportError("Device Free failure"); + } + } + } + + if (sg_backendHandle) { + pal::dynamicloading::dlClose(sg_backendHandle); + } + if (sg_modelHandle) { + pal::dynamicloading::dlClose(sg_modelHandle); + } + + return EXIT_SUCCESS; +}