Skip to content

Commit

Permalink
Add testing for new error handling API
Browse files Browse the repository at this point in the history
  • Loading branch information
Tabrizian committed Jun 1, 2023
1 parent 3d8d517 commit fa3d797
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 10 deletions.
17 changes: 10 additions & 7 deletions qa/L0_backend_python/lifecycle/lifecycle_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,13 @@ def setUp(self):
self._shm_leak_detector = shm_util.ShmLeakDetector()

def test_batch_error(self):
# The execute_error model returns an error for the first request and
# sucessfully processes the second request. This is making sure that
# an error in a single request does not completely fail the batch.
# The execute_error model returns an error for the first and third
# request and sucessfully processes the second request. This is making
# sure that an error in a single request does not completely fail the
# batch.
model_name = "execute_error"
shape = [2, 2]
number_of_requests = 2
number_of_requests = 3
user_data = UserData()
triton_client = grpcclient.InferenceServerClient("localhost:8001")
triton_client.start_stream(callback=partial(callback, user_data))
Expand All @@ -83,7 +84,7 @@ def test_batch_error(self):

for i in range(number_of_requests):
result = user_data._completed_requests.get()
if i == 0:
if i == 0 or i == 2:
self.assertIs(type(result), InferenceServerException)
continue

Expand Down Expand Up @@ -139,7 +140,8 @@ def test_incorrect_execute_return(self):
self.assertTrue(
"Failed to process the request(s) for model instance "
"'execute_return_error_0', message: Expected a list in the "
"execute return" in str(e.exception), "Exception message is not correct.")
"execute return" in str(e.exception),
"Exception message is not correct.")

# The second inference request will return a list of None object
# instead of Python InferenceResponse objects.
Expand All @@ -150,7 +152,8 @@ def test_incorrect_execute_return(self):
"Failed to process the request(s) for model instance "
"'execute_return_error_0', message: Expected an "
"'InferenceResponse' object in the execute function return"
" list" in str(e.exception), "Exception message is not correct.")
" list" in str(e.exception),
"Exception message is not correct.")


if __name__ == '__main__':
Expand Down
10 changes: 7 additions & 3 deletions qa/python_models/execute_error/model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -34,7 +34,7 @@ def execute(self, requests):
"""
responses = []

# Only generate the error for the first request
# Generate the error for the first and third request
i = 0
for request in requests:
input_tensor = pb_utils.get_input_tensor_by_name(request, "IN")
Expand All @@ -44,8 +44,12 @@ def execute(self, requests):
'An error occured during execution')
responses.append(pb_utils.InferenceResponse([out_tensor],
error))
else:
elif i == 1:
responses.append(pb_utils.InferenceResponse([out_tensor]))
elif i == 2:
error = pb_utils.TritonError(
'An error occured during execution')
responses.append(pb_utils.InferenceResponse(error=error))
i += 1

return responses

0 comments on commit fa3d797

Please sign in to comment.