Skip to content

Commit

Permalink
Move multi-GPU dlpack test to a separate L0 test (triton-inference-se…
Browse files Browse the repository at this point in the history
…rver#6001)

* Move multi-GPU dlpack test to a separate L0 test

* Fix copyright

* Fix up
  • Loading branch information
Tabrizian authored Jul 7, 2023
1 parent 78d9d82 commit 4d864a1
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 7 deletions.
4 changes: 2 additions & 2 deletions qa/L0_backend_python/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -376,11 +376,11 @@ and shared memory pages after starting triton equals to $current_num_pages \n***
fi

# Disable env test for Jetson since cloud storage repos are not supported
# Disable ensemble, unittest, io and bls tests for Jetson since GPU Tensors are not supported
# Disable ensemble, io and bls tests for Jetson since GPU Tensors are not supported
# Disable variants test for Jetson since already built without GPU Tensor support
# Disable decoupled test because it uses GPU tensors
if [ "$TEST_JETSON" == "0" ]; then
SUBTESTS="ensemble unittest io bls decoupled variants"
SUBTESTS="ensemble io bls decoupled variants"
for TEST in ${SUBTESTS}; do
# Run each subtest in a separate virtual environment to avoid conflicts
# between dependencies.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -27,7 +27,7 @@

SERVER=/opt/tritonserver/bin/tritonserver
SERVER_ARGS="--model-repository=`pwd`/models --log-verbose=1"
CLIENT_PY=../python_unittest.py
CLIENT_PY=./python_unittest.py
CLIENT_LOG="./client.log"
EXPECTED_NUM_TESTS="1"
TEST_RESULT_FILE='test_results.txt'
Expand All @@ -37,7 +37,7 @@ export CUDA_VISIBLE_DEVICES=0,1,2,3
RET=0
rm -fr *.log ./models

source ../../common/util.sh
source ../common/util.sh

# Uninstall the non CUDA version of PyTorch
pip3 uninstall -y torch
Expand All @@ -50,8 +50,10 @@ pip3 install cupy-cuda12x
rm -fr *.log ./models

mkdir -p models/dlpack_test/1/
cp ../../python_models/dlpack_test/model.py models/dlpack_test/1/
cp ../../python_models/dlpack_test/config.pbtxt models/dlpack_test
cp ../python_models/dlpack_test/model.py models/dlpack_test/1/
cp ../python_models/dlpack_test/config.pbtxt models/dlpack_test
cp ../L0_backend_python/python_unittest.py .
sed -i 's#sys.path.append("../../common")#sys.path.append("../common")#g' python_unittest.py

run_server
if [ "$SERVER_PID" == "0" ]; then
Expand Down

0 comments on commit 4d864a1

Please sign in to comment.