Skip to content

Commit

Permalink
test_models.sh split helper functions into utils
Browse files Browse the repository at this point in the history
  • Loading branch information
kirklandsign committed Sep 17, 2024
1 parent c605bae commit 6b33c82
Show file tree
Hide file tree
Showing 2 changed files with 257 additions and 173 deletions.
185 changes: 12 additions & 173 deletions .ci/scripts/test_model.sh
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
set -exu

# shellcheck source=/dev/null
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
source "$(dirname "${BASH_SOURCE[0]}")/test_model_utils.sh"

MODEL_NAME=$1
if [[ -z "${MODEL_NAME:-}" ]]; then
Expand All @@ -30,181 +30,20 @@ fi

UPLOAD_DIR=${4:-}

if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
PYTHON_EXECUTABLE=python3
fi
which "${PYTHON_EXECUTABLE}"

# Just set this variable here, it's cheap even if we use buck2
CMAKE_OUTPUT_DIR=cmake-out
EXPORTED_MODEL=${MODEL_NAME}

prepare_artifacts_upload() {
if [ -n "$UPLOAD_DIR" ]; then
echo "Preparing for uploading generated artifacs"
zip -j model.zip "${EXPORTED_MODEL}"
mkdir -p "${UPLOAD_DIR}"
mv model.zip "${UPLOAD_DIR}"
fi
}

build_cmake_executor_runner() {
echo "Building executor_runner"
rm -rf ${CMAKE_OUTPUT_DIR}
cmake -DCMAKE_BUILD_TYPE=Debug \
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
-B${CMAKE_OUTPUT_DIR} .

cmake --build ${CMAKE_OUTPUT_DIR} -j4 --config Debug
}

run_portable_executor_runner() {
# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/portable/executor_runner:executor_runner -- --model_path "./${MODEL_NAME}.pte"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
build_cmake_executor_runner
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "./${MODEL_NAME}.pte"
else
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
exit 1
fi
}

test_model() {
if [[ "${MODEL_NAME}" == "llama2" ]]; then
# Install requirements for export_llama
bash examples/models/llama2/install_requirements.sh
# Test export_llama script: python3 -m examples.models.llama2.export_llama
"${PYTHON_EXECUTABLE}" -m examples.models.llama2.export_llama -c examples/models/llama2/params/demo_rand_params.pth -p examples/models/llama2/params/demo_config.json
run_portable_executor_runner
rm "./${MODEL_NAME}.pte"
fi
STRICT="--strict"
if [[ "${MODEL_NAME}" == "llava" ]]; then
# Install requirements for llava
bash examples/models/llava/install_requirements.sh
STRICT="--no-strict"
fi
# python3 -m examples.portable.scripts.export --model_name="llama2" should works too
"${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}" "${STRICT}"
run_portable_executor_runner
}

build_cmake_xnn_executor_runner() {
echo "Building xnn_executor_runner"
SITE_PACKAGES="$(${PYTHON_EXECUTABLE} -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
CMAKE_PREFIX_PATH="${SITE_PACKAGES}/torch"

(rm -rf ${CMAKE_OUTPUT_DIR} \
&& mkdir ${CMAKE_OUTPUT_DIR} \
&& cd ${CMAKE_OUTPUT_DIR} \
&& retry cmake -DCMAKE_BUILD_TYPE=Release \
-DEXECUTORCH_BUILD_XNNPACK=ON \
-DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH" \
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" ..)

cmake --build ${CMAKE_OUTPUT_DIR} -j4
}

test_model_with_xnnpack() {
WITH_QUANTIZATION=$1
WITH_DELEGATION=$2

# Quantization-only
if [[ ${WITH_QUANTIZATION} == true ]] && [[ ${WITH_DELEGATION} == false ]]; then
bash examples/xnnpack/quantization/test_quantize.sh "${BUILD_TOOL}" "${MODEL_NAME}"
return 0
fi

# Delegation
if [[ ${WITH_QUANTIZATION} == true ]]; then
SUFFIX="q8"
"${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --quantize
else
SUFFIX="fp32"
"${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate
fi

OUTPUT_MODEL_PATH="${MODEL_NAME}_xnnpack_${SUFFIX}.pte"
EXPORTED_MODEL=${OUTPUT_MODEL_PATH}

# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then
build_cmake_xnn_executor_runner
fi
./${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner --model_path "${OUTPUT_MODEL_PATH}"
else
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
exit 1
fi
}

test_model_with_qnn() {
source "$(dirname "${BASH_SOURCE[0]}")/build-qnn-sdk.sh"
echo "ANDROID_NDK_ROOT: $ANDROID_NDK_ROOT"
echo "QNN_SDK_ROOT: $QNN_SDK_ROOT"
echo "EXECUTORCH_ROOT: $EXECUTORCH_ROOT"

export LD_LIBRARY_PATH=$QNN_SDK_ROOT/lib/x86_64-linux-clang/
export PYTHONPATH=$EXECUTORCH_ROOT/..

if [[ "${MODEL_NAME}" == "dl3" ]]; then
EXPORT_SCRIPT=deeplab_v3
EXPORTED_MODEL_NAME=dlv3_qnn.pte
elif [[ "${MODEL_NAME}" == "mv3" ]]; then
EXPORT_SCRIPT=mobilenet_v3
EXPORTED_MODEL_NAME=mv3_qnn.pte
elif [[ "${MODEL_NAME}" == "mv2" ]]; then
EXPORT_SCRIPT=mobilenet_v2
EXPORTED_MODEL_NAME=mv2_qnn.pte
elif [[ "${MODEL_NAME}" == "ic4" ]]; then
EXPORT_SCRIPT=inception_v4
EXPORTED_MODEL_NAME=ic4_qnn.pte
elif [[ "${MODEL_NAME}" == "ic3" ]]; then
EXPORT_SCRIPT=inception_v3
EXPORTED_MODEL_NAME=ic3_qnn.pte
elif [[ "${MODEL_NAME}" == "vit" ]]; then
EXPORT_SCRIPT=torchvision_vit
EXPORTED_MODEL_NAME=vit_qnn.pte
fi

# Use SM8450 for S22, SM8550 for S23, and SM8560 for S24
# TODO(guangyang): Make QNN chipset matches the target device
QNN_CHIPSET=SM8450

"${PYTHON_EXECUTABLE}" -m examples.qualcomm.scripts.${EXPORT_SCRIPT} -b ${CMAKE_OUTPUT_DIR} -m ${QNN_CHIPSET} --compile_only
EXPORTED_MODEL=./${EXPORT_SCRIPT}/${EXPORTED_MODEL_NAME}
}

test_model_with_coreml() {
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
echo "coreml doesn't support buck2."
exit 1
fi

"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}"
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
}

if [[ "${BACKEND}" == "portable" ]]; then
echo "Testing ${MODEL_NAME} with portable kernels..."
test_model
test_model $MODEL_NAME
elif [[ "${BACKEND}" == "qnn" ]]; then
echo "Testing ${MODEL_NAME} with qnn..."
test_model_with_qnn
test_model_with_qnn $MODEL_NAME
if [[ $? -eq 0 ]]; then
prepare_artifacts_upload
prepare_artifacts_upload $UPLOAD_DIR
fi
elif [[ "${BACKEND}" == "coreml" ]]; then
echo "Testing ${MODEL_NAME} with coreml..."
test_model_with_coreml
test_model_with_coreml $MODEL_NAME $BUILD_TOOL
if [[ $? -eq 0 ]]; then
prepare_artifacts_upload
prepare_artifacts_upload $UPLOAD_DIR
fi
elif [[ "${BACKEND}" == "xnnpack" ]]; then
echo "Testing ${MODEL_NAME} with xnnpack..."
Expand All @@ -214,32 +53,32 @@ elif [[ "${BACKEND}" == "xnnpack" ]]; then
# TODO(T197452682)
WITH_QUANTIZATION=false
fi
test_model_with_xnnpack "${WITH_QUANTIZATION}" "${WITH_DELEGATION}"
test_model_with_xnnpack "${MODEL_NAME}" "${WITH_QUANTIZATION}" "${WITH_DELEGATION}" "${BUILD_TOOL}"
if [[ $? -eq 0 ]]; then
prepare_artifacts_upload
prepare_artifacts_upload $UPLOAD_DIR
fi
else
set +e
if [[ "${BACKEND}" == *"quantization"* ]]; then
echo "::group::Testing ${MODEL_NAME} with XNNPACK quantization only..."
test_model_with_xnnpack true false || Q_ERROR="error"
test_model_with_xnnpack "${MODEL_NAME}" true false "${BUILD_TOOL}" || Q_ERROR="error"
echo "::endgroup::"
fi
if [[ "${BACKEND}" == *"delegation"* ]]; then
echo "::group::Testing ${MODEL_NAME} with XNNPACK delegation only..."
test_model_with_xnnpack false true || D_ERROR="error"
test_model_with_xnnpack "${MODEL_NAME}" false true "${BUILD_TOOL}" || D_ERROR="error"
echo "::endgroup::"
fi
if [[ "${BACKEND}" == *"quantization"* ]] && [[ "${BACKEND}" == *"delegation"* ]]; then
echo "::group::Testing ${MODEL_NAME} with XNNPACK quantization and delegation..."
test_model_with_xnnpack true true || Q_D_ERROR="error"
test_model_with_xnnpack "${MODEL_NAME}" true true "${BUILD_TOOL}" || Q_D_ERROR="error"
echo "::endgroup::"
fi
set -e
if [[ -n "${Q_ERROR:-}" ]] || [[ -n "${D_ERROR:-}" ]] || [[ -n "${Q_D_ERROR:-}" ]]; then
echo "Portable q8 ${Q_ERROR:-ok}," "Delegation fp32 ${D_ERROR:-ok}," "Delegation q8 ${Q_D_ERROR:-ok}"
exit 1
else
prepare_artifacts_upload
prepare_artifacts_upload $UPLOAD_DIR
fi
fi
Loading

0 comments on commit 6b33c82

Please sign in to comment.