Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add test for sequence model instance update #5831

Merged
merged 22 commits into from
Jul 24, 2023
Merged
Changes from 1 commit
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
7637d14
Add test for sequence model instance update
kthui May 19, 2023
8eda66f
Add gap for file timestamp update
kthui May 24, 2023
62bff4d
Update test for non-blocking sequence update
kthui Jun 27, 2023
e386e28
Update documentation
kthui Jun 27, 2023
46595ca
Remove mentioning increase instance count case
kthui Jun 28, 2023
1998f33
Add more documentaion for scheduler update test
kthui Jul 13, 2023
37ab460
Update test for non-blocking batcher removal
kthui Jul 14, 2023
453d302
Add polling due to async scheduler destruction
kthui Jul 17, 2023
2b76ddb
Use _ as private
kthui Jul 18, 2023
0d3b784
Fix typo
kthui Jul 19, 2023
017a76c
Add docs on instance count decrease
kthui Jul 19, 2023
c8456ad
Fix typo
kthui Jul 19, 2023
c9d7b5f
Separate direct and oldest to different test cases
kthui Jul 20, 2023
dcb55f0
Separate nested tests in a loop into multiple test cases
kthui Jul 20, 2023
0268918
Refactor scheduler update test
kthui Jul 20, 2023
38b0ade
Improve doc on handling future test failures
kthui Jul 20, 2023
e05434d
Merge branch 'main' of github.com:triton-inference-server/server into…
kthui Jul 20, 2023
f3a9f75
Address pre-commit
kthui Jul 20, 2023
99f5935
Add best effort to reset model state after a single test case failure
kthui Jul 21, 2023
fae2e1a
Remove reset model method to make harder for chaining multiple test c…
kthui Jul 21, 2023
ded51b4
Remove description on model state clean up
kthui Jul 21, 2023
9e44efc
Merge branch 'main' of github.com:triton-inference-server/server into…
kthui Jul 21, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Separate nested tests in a loop into multiple test cases
  • Loading branch information
kthui committed Jul 20, 2023
commit dcb55f0c047a0bc7c88f1fde7693bcfc3a01b89b
62 changes: 39 additions & 23 deletions qa/L0_model_update/instance_update_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,29 +145,45 @@ def _unload_model(self, batching=False):
with self.assertRaises(InferenceServerException):
self._infer(batching)

# Test add -> remove -> add an instance
def test_add_rm_add_instance(self):
for batching in [False, True]:
self._load_model(3, batching=batching)
stop = self._concurrent_infer(batching=batching)
self._update_instance_count(1, 0, batching=batching) # add
self._update_instance_count(0, 1, batching=batching) # remove
self._update_instance_count(1, 0, batching=batching) # add
stop()
self._unload_model(batching=batching)
self._reset_model() # for next iteration

# Test remove -> add -> remove an instance
def test_rm_add_rm_instance(self):
for batching in [False, True]:
self._load_model(2, batching=batching)
stop = self._concurrent_infer(batching=batching)
self._update_instance_count(0, 1, batching=batching) # remove
self._update_instance_count(1, 0, batching=batching) # add
self._update_instance_count(0, 1, batching=batching) # remove
stop()
self._unload_model(batching=batching)
self._reset_model() # for next iteration
# Test add -> remove -> add an instance without batching
def test_add_rm_add_instance_no_batching(self):
self._load_model(3, batching=False)
stop = self._concurrent_infer(batching=False)
self._update_instance_count(1, 0, batching=False) # add
self._update_instance_count(0, 1, batching=False) # remove
self._update_instance_count(1, 0, batching=False) # add
stop()
self._unload_model(batching=False)

# Test add -> remove -> add an instance with batching
def test_add_rm_add_instance_with_batching(self):
self._load_model(4, batching=True)
stop = self._concurrent_infer(batching=True)
self._update_instance_count(1, 0, batching=True) # add
self._update_instance_count(0, 1, batching=True) # remove
self._update_instance_count(1, 0, batching=True) # add
stop()
self._unload_model(batching=True)

# Test remove -> add -> remove an instance without batching
def test_rm_add_rm_instance_no_batching(self):
self._load_model(2, batching=False)
stop = self._concurrent_infer(batching=False)
self._update_instance_count(0, 1, batching=False) # remove
self._update_instance_count(1, 0, batching=False) # add
self._update_instance_count(0, 1, batching=False) # remove
stop()
self._unload_model(batching=False)

# Test remove -> add -> remove an instance with batching
def test_rm_add_rm_instance_with_batching(self):
self._load_model(3, batching=True)
stop = self._concurrent_infer(batching=True)
self._update_instance_count(0, 1, batching=True) # remove
self._update_instance_count(1, 0, batching=True) # add
self._update_instance_count(0, 1, batching=True) # remove
stop()
self._unload_model(batching=True)

# Test reduce instance count to zero
def test_rm_instance_to_zero(self):
Expand Down