diff --git a/qa/L0_custom_ops/test.sh b/qa/L0_custom_ops/test.sh index a0f1e9c782..10aee37aaf 100755 --- a/qa/L0_custom_ops/test.sh +++ b/qa/L0_custom_ops/test.sh @@ -111,7 +111,10 @@ LD_LIBRARY_PATH=/opt/tritonserver/backends/pytorch:$LD_LIBRARY_PATH # Pytorch SERVER_ARGS="--model-repository=/data/inferenceserver/${REPO_VERSION}/qa_custom_ops/libtorch_custom_ops" -SERVER_LD_PRELOAD="/data/inferenceserver/${REPO_VERSION}/qa_custom_ops/libtorch_custom_ops/libtorch_modulo/custom_modulo.so" +# FIXME: Pre-loading the python library system to satisfy the symbol definitions +# as the custom op library is built with different python version within +# pytorch container. See DLIS-4152. +SERVER_LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libpython3.8.so.1:/data/inferenceserver/${REPO_VERSION}/qa_custom_ops/libtorch_custom_ops/libtorch_modulo/custom_modulo.so" run_server if [ "$SERVER_PID" == "0" ]; then echo -e "\n***\n*** Failed to start $SERVER\n***"