Skip to content

Commit b6cfe24

Browse files
Auto-generated API code
1 parent d1b99c8 commit b6cfe24

File tree

2 files changed

+8
-0
lines changed

2 files changed

+8
-0
lines changed

elasticsearch/_async/client/ml.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3582,6 +3582,7 @@ async def start_trained_model_deployment(
35823582
*,
35833583
model_id: str,
35843584
cache_size: t.Optional[t.Union[int, str]] = None,
3585+
deployment_id: t.Optional[str] = None,
35853586
error_trace: t.Optional[bool] = None,
35863587
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
35873588
human: t.Optional[bool] = None,
@@ -3605,6 +3606,7 @@ async def start_trained_model_deployment(
36053606
:param cache_size: The inference cache size (in memory outside the JVM heap)
36063607
per node for the model. The default value is the same size as the `model_size_bytes`.
36073608
To disable the cache, `0b` can be provided.
3609+
:param deployment_id: A unique identifier for the deployment of the model.
36083610
:param number_of_allocations: The number of model allocations on each node where
36093611
the model is deployed. All allocations on a node share the same copy of the
36103612
model in memory but use a separate set of threads to evaluate the model.
@@ -3631,6 +3633,8 @@ async def start_trained_model_deployment(
36313633
__query: t.Dict[str, t.Any] = {}
36323634
if cache_size is not None:
36333635
__query["cache_size"] = cache_size
3636+
if deployment_id is not None:
3637+
__query["deployment_id"] = deployment_id
36343638
if error_trace is not None:
36353639
__query["error_trace"] = error_trace
36363640
if filter_path is not None:

elasticsearch/_sync/client/ml.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3582,6 +3582,7 @@ def start_trained_model_deployment(
35823582
*,
35833583
model_id: str,
35843584
cache_size: t.Optional[t.Union[int, str]] = None,
3585+
deployment_id: t.Optional[str] = None,
35853586
error_trace: t.Optional[bool] = None,
35863587
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
35873588
human: t.Optional[bool] = None,
@@ -3605,6 +3606,7 @@ def start_trained_model_deployment(
36053606
:param cache_size: The inference cache size (in memory outside the JVM heap)
36063607
per node for the model. The default value is the same size as the `model_size_bytes`.
36073608
To disable the cache, `0b` can be provided.
3609+
:param deployment_id: A unique identifier for the deployment of the model.
36083610
:param number_of_allocations: The number of model allocations on each node where
36093611
the model is deployed. All allocations on a node share the same copy of the
36103612
model in memory but use a separate set of threads to evaluate the model.
@@ -3631,6 +3633,8 @@ def start_trained_model_deployment(
36313633
__query: t.Dict[str, t.Any] = {}
36323634
if cache_size is not None:
36333635
__query["cache_size"] = cache_size
3636+
if deployment_id is not None:
3637+
__query["deployment_id"] = deployment_id
36343638
if error_trace is not None:
36353639
__query["error_trace"] = error_trace
36363640
if filter_path is not None:

0 commit comments

Comments
 (0)