From 264b9e4700667e895c272f8da6a5a43d0c50d865 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Mon, 1 Apr 2024 18:09:35 +0800 Subject: [PATCH] Add "properties_path" in BundleWorkflow (#7542) Fixes #7541 ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- monai/bundle/workflows.py | 23 +++++-- tests/test_bundle_workflow.py | 10 +++ tests/test_regularization.py | 16 +++++ tests/testing_data/fl_infer_properties.json | 67 +++++++++++++++++++++ 4 files changed, 112 insertions(+), 4 deletions(-) create mode 100644 tests/testing_data/fl_infer_properties.json diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 804b3b06f0..d876f6d7ae 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -11,6 +11,7 @@ from __future__ import annotations +import json import os import sys import time @@ -24,6 +25,7 @@ from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties from monai.bundle.utils import DEFAULT_EXP_MGMT_SETTINGS, EXPR_KEY, ID_REF_KEY, ID_SEP_KEY +from monai.config import PathLike from monai.utils import BundleProperty, BundlePropertyConfig, deprecated_arg, deprecated_arg_default, ensure_tuple __all__ = ["BundleWorkflow", "ConfigWorkflow"] @@ -46,6 +48,7 @@ class BundleWorkflow(ABC): or "infer", "inference", "eval", "evaluation" for a inference workflow, other unsupported string will raise a ValueError. default to `None` for common workflow. + properties_path: the path to the JSON file of properties. meta_file: filepath of the metadata file, if this is a list of file paths, their contents will be merged in order. logging_file: config file for `logging` module in the program. for more details: https://docs.python.org/3/library/logging.config.html#logging.config.fileConfig. @@ -66,6 +69,7 @@ def __init__( self, workflow_type: str | None = None, workflow: str | None = None, + properties_path: PathLike | None = None, meta_file: str | Sequence[str] | None = None, logging_file: str | None = None, ): @@ -92,15 +96,24 @@ def __init__( meta_file = None workflow_type = workflow if workflow is not None else workflow_type - if workflow_type is None: + if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) self.workflow_type = None self.meta_file = meta_file return - if workflow_type.lower() in self.supported_train_type: + if properties_path is not None: + properties_path = Path(properties_path) + if not properties_path.is_file(): + raise ValueError(f"Property file {properties_path} does not exist.") + with open(properties_path) as json_file: + self.properties = json.load(json_file) + self.workflow_type = None + self.meta_file = meta_file + return + if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" - elif workflow_type.lower() in self.supported_infer_type: + elif workflow_type.lower() in self.supported_infer_type: # type: ignore[union-attr] self.properties = {**InferProperties, **MetaProperties} self.workflow_type = "infer" else: @@ -247,6 +260,7 @@ class ConfigWorkflow(BundleWorkflow): or "infer", "inference", "eval", "evaluation" for a inference workflow, other unsupported string will raise a ValueError. default to `None` for common workflow. + properties_path: the path to the JSON file of properties. override: id-value pairs to override or add the corresponding config content. e.g. ``--net#input_chns 42``, ``--net %/data/other.json#net_arg`` @@ -271,6 +285,7 @@ def __init__( tracking: str | dict | None = None, workflow_type: str | None = None, workflow: str | None = None, + properties_path: PathLike | None = None, **override: Any, ) -> None: workflow_type = workflow if workflow is not None else workflow_type @@ -289,7 +304,7 @@ def __init__( else: config_root_path = Path("configs") meta_file = str(config_root_path / "metadata.json") if meta_file is None else meta_file - super().__init__(workflow_type=workflow_type, meta_file=meta_file) + super().__init__(workflow_type=workflow_type, meta_file=meta_file, properties_path=properties_path) self.config_root_path = config_root_path logging_file = str(self.config_root_path / "logging.conf") if logging_file is None else logging_file if logging_file is not None: diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index 0b0d51cbfb..9a276b577f 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -105,6 +105,16 @@ def test_inference_config(self, config_file): ) self._test_inferer(inferer) + # test property path + inferer = ConfigWorkflow( + config_file=config_file, + properties_path=os.path.join(os.path.dirname(__file__), "testing_data", "fl_infer_properties.json"), + logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), + **override, + ) + self._test_inferer(inferer) + self.assertEqual(inferer.workflow_type, None) + @parameterized.expand([TEST_CASE_3]) def test_train_config(self, config_file): # test standard MONAI model-zoo config workflow diff --git a/tests/test_regularization.py b/tests/test_regularization.py index 22faf1027d..c6f727cb54 100644 --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -16,9 +16,15 @@ import torch from monai.transforms import CutMix, CutMixd, CutOut, MixUp, MixUpd +from monai.utils import set_determinism class TestMixup(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) def test_mixup(self): for dims in [2, 3]: @@ -53,6 +59,11 @@ def test_mixupd(self): class TestCutMix(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) def test_cutmix(self): for dims in [2, 3]: @@ -78,6 +89,11 @@ def test_cutmixd(self): class TestCutOut(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) def test_cutout(self): for dims in [2, 3]: diff --git a/tests/testing_data/fl_infer_properties.json b/tests/testing_data/fl_infer_properties.json new file mode 100644 index 0000000000..72e97cd2c6 --- /dev/null +++ b/tests/testing_data/fl_infer_properties.json @@ -0,0 +1,67 @@ +{ + "bundle_root": { + "description": "root path of the bundle.", + "required": true, + "id": "bundle_root" + }, + "device": { + "description": "target device to execute the bundle workflow.", + "required": true, + "id": "device" + }, + "dataset_dir": { + "description": "directory path of the dataset.", + "required": true, + "id": "dataset_dir" + }, + "dataset": { + "description": "PyTorch dataset object for the inference / evaluation logic.", + "required": true, + "id": "dataset" + }, + "evaluator": { + "description": "inference / evaluation workflow engine.", + "required": true, + "id": "evaluator" + }, + "network_def": { + "description": "network module for the inference.", + "required": true, + "id": "network_def" + }, + "inferer": { + "description": "MONAI Inferer object to execute the model computation in inference.", + "required": true, + "id": "inferer" + }, + "dataset_data": { + "description": "data source for the inference / evaluation dataset.", + "required": false, + "id": "dataset::data", + "refer_id": null + }, + "handlers": { + "description": "event-handlers for the inference / evaluation logic.", + "required": false, + "id": "handlers", + "refer_id": "evaluator::val_handlers" + }, + "preprocessing": { + "description": "preprocessing for the input data.", + "required": false, + "id": "preprocessing", + "refer_id": "dataset::transform" + }, + "postprocessing": { + "description": "postprocessing for the model output data.", + "required": false, + "id": "postprocessing", + "refer_id": "evaluator::postprocessing" + }, + "key_metric": { + "description": "the key metric during evaluation.", + "required": false, + "id": "key_metric", + "refer_id": "evaluator::key_val_metric" + } +}