Skip to content

Commit f0d8022

Browse files
authored
Add model run name for model.run (#435)
* Add model run name for model.run * Fix test * Remove epoch * Reduce assert payload size
1 parent 4139951 commit f0d8022

File tree

5 files changed

+19
-41
lines changed

5 files changed

+19
-41
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,12 @@ All notable changes to the [Nucleus Python Client](https://github.com/scaleapi/n
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8+
## [0.17.4](https://github.com/scaleapi/nucleus-python-client/releases/tag/v0.17.4) - 2024-03-25
9+
10+
### Modified
11+
- In `Model.run`, added the `model_run_name` parameter. This allows the creation of multiple model runs for datasets.
12+
13+
814
## [0.17.3] - 2024-02-29
915

1016
### Added

nucleus/model.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,9 @@ def evaluate(self, scenario_test_names: List[str]) -> AsyncJob:
211211
)
212212
return AsyncJob.from_json(response, self._client)
213213

214-
def run(self, dataset_id: str, slice_id: Optional[str]) -> str:
214+
def run(
215+
self, dataset_id: str, model_run_name: str, slice_id: Optional[str]
216+
) -> str:
215217
"""Runs inference on the bundle associated with the model on the dataset. ::
216218
217219
import nucleus
@@ -222,11 +224,18 @@ def run(self, dataset_id: str, slice_id: Optional[str]) -> str:
222224
223225
Args:
224226
dataset_id: The ID of the dataset to run inference on.
225-
job_id: The ID of the :class:`AsyncJob` used to track job progress.
227+
model_run_name: The name of the model run.
226228
slice_id: The ID of the slice of the dataset to run inference on.
229+
230+
Returns:
231+
job_id: The ID of the :class:`AsyncJob` used to track job progress.
227232
"""
228233
response = self._client.make_request(
229-
{"dataset_id": dataset_id, "slice_id": slice_id},
234+
{
235+
"dataset_id": dataset_id,
236+
"slice_id": slice_id,
237+
"model_run_name": model_run_name,
238+
},
230239
f"model/run/{self.id}/",
231240
requests_command=requests.post,
232241
)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ ignore = ["E501", "E741", "E731", "F401"] # Easy ignore for getting it running
2525

2626
[tool.poetry]
2727
name = "scale-nucleus"
28-
version = "0.17.3"
28+
version = "0.17.4"
2929
description = "The official Python client library for Nucleus, the Data Platform for AI"
3030
license = "MIT"
3131
authors = ["Scale AI Nucleus Team <nucleusapi@scaleapi.com>"]

tests/test_annotation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -824,7 +824,6 @@ def test_default_category_gt_upload_async(dataset):
824824
"status": "Completed",
825825
"message": {
826826
"annotation_upload": {
827-
"epoch": 1,
828827
"total": 1,
829828
"errored": 0,
830829
"ignored": 0,

tests/test_dataset.py

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -380,24 +380,6 @@ def test_annotate_async(dataset: Dataset):
380380
expected = {
381381
"job_id": job.job_id,
382382
"status": "Completed",
383-
"message": {
384-
"annotation_upload": {
385-
"epoch": 1,
386-
"total": 4,
387-
"errored": 0,
388-
"ignored": 0,
389-
"datasetId": dataset.id,
390-
"processed": 4,
391-
},
392-
"segmentation_upload": {
393-
"ignored": 0,
394-
"n_errors": 0,
395-
"processed": 1,
396-
},
397-
},
398-
"job_progress": "1.00",
399-
"completed_steps": 5,
400-
"total_steps": 5,
401383
}
402384
assert_partial_equality(expected, status)
403385

@@ -423,24 +405,6 @@ def test_annotate_async_with_error(dataset: Dataset):
423405
expected = {
424406
"job_id": job.job_id,
425407
"status": "Completed",
426-
"message": {
427-
"annotation_upload": {
428-
"epoch": 1,
429-
"total": 4,
430-
"errored": 1,
431-
"ignored": 0,
432-
"datasetId": dataset.id,
433-
"processed": 3,
434-
},
435-
"segmentation_upload": {
436-
"ignored": 0,
437-
"n_errors": 0,
438-
"processed": 1,
439-
},
440-
},
441-
"job_progress": "1.00",
442-
"completed_steps": 5,
443-
"total_steps": 5,
444408
}
445409
assert_partial_equality(expected, status)
446410

0 commit comments

Comments
 (0)