Skip to content

Commit

Permalink
Fix np.object_ deprecations (triton-inference-server#5184)
Browse files Browse the repository at this point in the history
* Fix np.object_ deprecations

* Review comment
  • Loading branch information
Tabrizian committed Dec 20, 2022
1 parent 580c062 commit e4e25d1
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 16 deletions.
9 changes: 7 additions & 2 deletions qa/L0_backend_python/python_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,9 @@ def test_unicode(self):
model_name = "string"
shape = [1]

for i in range(3):
# The first run will use np.bytes_ and the second run will use
# np.object_
for i in range(2):
with self._shm_leak_detector.Probe() as shm_probe:
with httpclient.InferenceServerClient(
"localhost:8000") as client:
Expand Down Expand Up @@ -328,7 +330,10 @@ def test_string(self):
model_name = "string_fixed"
shape = [1]

for i in range(6):
# Test different string outputs. This test will send 4 requests to the
# backend. The model will return 4 responses (np.object_ and np.bytes) *
# (empty output and fixed output)
for i in range(4):
with self._shm_leak_detector.Probe() as shm_probe:
with httpclient.InferenceServerClient(
"localhost:8000") as client:
Expand Down
4 changes: 2 additions & 2 deletions qa/L0_string_io/string_client_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/usr/bin/env python
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -148,7 +148,7 @@ def _test_str_dtype(self, client, model_name, dtype=np.object_):
self._test_infer_non_unicode(model_name, client, in0_bytes)

def _test_bytes(self, model_name):
dtypes = [np.object_, np.object, np.bytes_]
dtypes = [np.object_, np.bytes_]

# This clients will fail for binary_data=False when the binary input
# is not UTF-8 encodable. They should work for other cases however.
Expand Down
4 changes: 2 additions & 2 deletions qa/python_models/string/model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -35,7 +35,7 @@ class TritonPythonModel:

def initialize(self, args):
self._index = 0
self._dtypes = [np.bytes_, np.object_, np.object]
self._dtypes = [np.bytes_, np.object_]

def execute(self, requests):
responses = []
Expand Down
24 changes: 14 additions & 10 deletions qa/python_models/string_fixed/model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -35,21 +35,25 @@ class TritonPythonModel:

def initialize(self, args):
self._index = 0
self._dtypes = [np.bytes_, np.object_, np.object]
self._dtypes = [np.bytes_, np.object_]

def execute(self, requests):
# Create four different responses (empty string or fixed string) * (two
# datatypes)
responses = []
for _ in requests:
if self._index % 2 == 0:
if self._index == 0:
out_tensor_0 = pb_utils.Tensor(
"OUTPUT0",
np.array(['123456'], dtype=self._dtypes[self._index % 3]))
else:
# Test sending strings with no elements
"OUTPUT0", np.array(['123456'], dtype=self._dtypes[0]))
elif self._index == 1:
out_tensor_0 = pb_utils.Tensor(
"OUTPUT0", np.array([],
dtype=self._dtypes[self._index % 3]))

"OUTPUT0", np.array([], dtype=self._dtypes[1]))
elif self._index == 2:
out_tensor_0 = pb_utils.Tensor(
"OUTPUT0", np.array(['123456'], dtype=self._dtypes[0]))
elif self._index == 3:
out_tensor_0 = pb_utils.Tensor(
"OUTPUT0", np.array([], dtype=self._dtypes[1]))
self._index += 1
responses.append(pb_utils.InferenceResponse([out_tensor_0]))
return responses

0 comments on commit e4e25d1

Please sign in to comment.