Skip to content

Commit

Permalink
simple_sequence_client must use stream with async (triton-inference-s…
Browse files Browse the repository at this point in the history
  • Loading branch information
deadeyegoodwin authored Aug 23, 2019
1 parent 072566f commit fd5eca9
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 9 deletions.
18 changes: 13 additions & 5 deletions src/clients/c++/simple_sequence_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -236,14 +236,22 @@ main(int argc, char** argv)
const ni::CorrelationID correlation_id0 = 1;
const ni::CorrelationID correlation_id1 = 2;

// Create two different contexts, one is using streaming while the other
// isn't. Then we can compare their difference in sync/async runs
// Create two different contexts, in the sync case we can use one
// streaming and one not streaming. In the async case must use
// streaming for both since async+non-streaming means that order of
// requests reaching inference server is not guaranteed.
err = nic::InferGrpcStreamContext::Create(
&ctx0, correlation_id0, url, model_name, -1 /* model_version */, verbose);
if (err.IsOk()) {
err = nic::InferGrpcContext::Create(
&ctx1, correlation_id1, url, model_name, -1 /* model_version */,
verbose);
if (async) {
err = nic::InferGrpcStreamContext::Create(
&ctx1, correlation_id1, url, model_name, -1 /* model_version */,
verbose);
} else {
err = nic::InferGrpcContext::Create(
&ctx1, correlation_id1, url, model_name, -1 /* model_version */,
verbose);
}
}

if (!err.IsOk()) {
Expand Down
12 changes: 8 additions & 4 deletions src/clients/python/simple_sequence_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,15 +95,19 @@ def async_receive(ctx, request_id):
# correlation ID.
values = [11, 7, 5, 3, 2, 0, 1]

# For the two different contexts, one is using streaming while the other
# isn't. Then we can compare their difference in sync/async runs
# Create two different contexts, in the sync case we can use one
# streaming and one not streaming. In the async case must use
# streaming for both since async+non-streaming means that order of
# requests reaching inference server is not guaranteed.
correlation_id0 = 1000
ctx0 = InferContext(FLAGS.url, protocol, model_name, model_version,
correlation_id=correlation_id0, verbose=FLAGS.verbose, streaming=True)
correlation_id=correlation_id0, verbose=FLAGS.verbose,
streaming=True)

correlation_id1 = 1001
ctx1 = InferContext(FLAGS.url, protocol, model_name, model_version,
correlation_id=correlation_id1, verbose=FLAGS.verbose, streaming=False)
correlation_id=correlation_id1, verbose=FLAGS.verbose,
streaming=FLAGS.async_set)

# Now send the inference sequences..
ctxs = []
Expand Down

0 comments on commit fd5eca9

Please sign in to comment.