Skip to content

Commit

Permalink
Add test case for decoupled model raising exception (#6686) (#6707)
Browse files Browse the repository at this point in the history
* Add test case for decoupled model raising exception

* Remove unused import

* Address comment
  • Loading branch information
krishung5 committed Dec 15, 2023
1 parent 9d3bdc9 commit b807dee
Show file tree
Hide file tree
Showing 4 changed files with 121 additions and 1 deletion.
30 changes: 30 additions & 0 deletions qa/L0_backend_python/decoupled/decoupled_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,36 @@ def test_decoupled_execute_cancel(self):
self.assertIn("[execute_cancel] Request not cancelled at 1.0 s", log_text)
self.assertIn("[execute_cancel] Request cancelled at ", log_text)

def test_decoupled_raise_exception(self):
# The decoupled_raise_exception model raises an exception for the request.
# This test case is making sure that repeated exceptions are properly handled.

model_name = "decoupled_raise_exception"
shape = [2, 2]
number_of_requests = 10
user_data = UserData()
with grpcclient.InferenceServerClient("localhost:8001") as triton_client:
triton_client.start_stream(callback=partial(callback, user_data))

input_datas = []
for i in range(number_of_requests):
input_data = np.random.randn(*shape).astype(np.float32)
input_datas.append(input_data)
inputs = [
grpcclient.InferInput(
"IN", input_data.shape, np_to_triton_dtype(input_data.dtype)
)
]
inputs[0].set_data_from_numpy(input_data)
triton_client.async_stream_infer(model_name=model_name, inputs=inputs)

for i in range(number_of_requests):
result = user_data._completed_requests.get()
self.assertIs(type(result), InferenceServerException)
self.assertIn("Intentional Error", result.message())

self.assertTrue(triton_client.is_model_ready(model_name))


if __name__ == "__main__":
unittest.main()
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


class TritonPythonModel:
def initialize(self, args):
pass

def execute(self, requests):
for request in requests:
raise Exception("Intentional Error")
return None
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

name: "decoupled_raise_exception"
backend: "python"
max_batch_size: 64

model_transaction_policy {
decoupled: True
}
input [
{
name: "IN"
data_type: TYPE_FP32
dims: [ -1 ]
}
]

output [
{
name: "OUT"
data_type: TYPE_FP32
dims: [ -1 ]
}
]

instance_group [
{
count: 1
kind : KIND_CPU
}
]
2 changes: 1 addition & 1 deletion qa/L0_backend_python/decoupled/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

CLIENT_PY=./decoupled_test.py
CLIENT_LOG="./decoupled_client.log"
EXPECTED_NUM_TESTS="6"
EXPECTED_NUM_TESTS="7"
TEST_RESULT_FILE='test_results.txt'
TRITON_DIR=${TRITON_DIR:="/opt/tritonserver"}
SERVER=${TRITON_DIR}/bin/tritonserver
Expand Down

0 comments on commit b807dee

Please sign in to comment.