From b55ebad760052f0d509fa1aa60ab39770031aace Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 14 Dec 2023 15:40:13 +0100 Subject: [PATCH] Fix `flatten_timepoint_specific_output_overrides` not supporting observableParameter overrides as placeholders in noise formulae Fixes #234 Also fixes return type annotation, makes test more readable, and uses more informative assertions. --- petab/core.py | 35 ++++++++++++------------- tests/test_petab.py | 64 ++++++++++++++++++++++----------------------- 2 files changed, 49 insertions(+), 50 deletions(-) diff --git a/petab/core.py b/petab/core.py index 05deb161..0e7b7dad 100644 --- a/petab/core.py +++ b/petab/core.py @@ -71,7 +71,7 @@ def write_simulation_df(df: pd.DataFrame, filename: Union[str, Path]) -> None: def get_visualization_df( - visualization_file: Union[str, Path, pd.DataFrame, None] + visualization_file: Union[str, Path, pd.DataFrame, None] ) -> Union[pd.DataFrame, None]: """Read PEtab visualization table @@ -254,7 +254,7 @@ def flatten_timepoint_specific_output_overrides( Arguments: petab_problem: - PEtab problem to work on + PEtab problem to work on. Modified in place. """ new_measurement_dfs = [] new_observable_dfs = [] @@ -277,22 +277,21 @@ def flatten_timepoint_specific_output_overrides( for field, hyperparameter_type, target in [ (NOISE_PARAMETERS, "noiseParameter", NOISE_FORMULA), (OBSERVABLE_PARAMETERS, "observableParameter", OBSERVABLE_FORMULA), + (OBSERVABLE_PARAMETERS, "observableParameter", NOISE_FORMULA), ]: - if field in measurements: - hyperparameter_replacement_id = ( - get_hyperparameter_replacement_id( - hyperparameter_type=hyperparameter_type, - observable_replacement_id=observable_replacement_id, - ) - ) - hyperparameter_id = mappings[field][ - hyperparameter_replacement_id - ] - observable[target] = re.sub( - hyperparameter_id, - hyperparameter_replacement_id, - observable[target], - ) + if field not in measurements: + continue + + hyperparameter_replacement_id = get_hyperparameter_replacement_id( + hyperparameter_type=hyperparameter_type, + observable_replacement_id=observable_replacement_id, + ) + hyperparameter_id = mappings[field][hyperparameter_replacement_id] + observable[target] = re.sub( + hyperparameter_id, + hyperparameter_replacement_id, + observable[target], + ) measurements[OBSERVABLE_ID] = observable_replacement_id new_measurement_dfs.append(measurements) @@ -306,7 +305,7 @@ def flatten_timepoint_specific_output_overrides( def unflatten_simulation_df( simulation_df: pd.DataFrame, petab_problem: "petab.problem.Problem", -) -> None: +) -> pd.DataFrame: """Unflatten simulations from a flattened PEtab problem. A flattened PEtab problem is the output of applying diff --git a/tests/test_petab.py b/tests/test_petab.py index 89053fb4..b8368805 100644 --- a/tests/test_petab.py +++ b/tests/test_petab.py @@ -353,36 +353,32 @@ def test_flatten_timepoint_specific_output_overrides(): OBSERVABLE_FORMULA: [ "observableParameter1_obs1 + observableParameter2_obs1" ], - NOISE_FORMULA: ["noiseParameter1_obs1"], + NOISE_FORMULA: [ + "(observableParameter1_obs1 + observableParameter2_obs1) * noiseParameter1_obs1" + ], } ) observable_df.set_index(OBSERVABLE_ID, inplace=True) + # new observable IDs (obs${i_obs}_${i_obsParOverride}_${i_noiseParOverride}_${i_condition}) + obs1_1_1_1 = "obs1__obsParOverride1_1_0__noiseParOverride1__condition1" + obs1_2_1_1 = "obs1__obsParOverride2_1_0__noiseParOverride1__condition1" + obs1_2_2_1 = "obs1__obsParOverride2_1_0__noiseParOverride2__condition1" observable_df_expected = pd.DataFrame( data={ - OBSERVABLE_ID: [ - "obs1__obsParOverride1_1_0__noiseParOverride1__condition1", - "obs1__obsParOverride2_1_0__noiseParOverride1__condition1", - "obs1__obsParOverride2_1_0__noiseParOverride2__condition1", - ], + OBSERVABLE_ID: [obs1_1_1_1, obs1_2_1_1, obs1_2_2_1], OBSERVABLE_FORMULA: [ - "observableParameter1_obs1__obsParOverride1_1_0__" - "noiseParOverride1__condition1 + observableParameter2_obs1" - "__obsParOverride1_1_0__noiseParOverride1__condition1", - "observableParameter1_obs1__obsParOverride2_1_0__noiseParOverride1" - "__condition1 + observableParameter2_obs1__obsParOverride2_1_0" - "__noiseParOverride1__condition1", - "observableParameter1_obs1__obsParOverride2_1_0" - "__noiseParOverride2__condition1 + observableParameter2_obs1__" - "obsParOverride2_1_0__noiseParOverride2__condition1", + f"observableParameter1_{obs1_1_1_1} + observableParameter2_{obs1_1_1_1}", + f"observableParameter1_{obs1_2_1_1} + observableParameter2_{obs1_2_1_1}", + f"observableParameter1_{obs1_2_2_1} + observableParameter2_{obs1_2_2_1}", ], NOISE_FORMULA: [ - "noiseParameter1_obs1__obsParOverride1_1_0__" - "noiseParOverride1__condition1", - "noiseParameter1_obs1__obsParOverride2_1_0__" - "noiseParOverride1__condition1", - "noiseParameter1_obs1__obsParOverride2_1_0__" - "noiseParOverride2__condition1", + f"(observableParameter1_{obs1_1_1_1} + observableParameter2_{obs1_1_1_1})" + f" * noiseParameter1_{obs1_1_1_1}", + f"(observableParameter1_{obs1_2_1_1} + observableParameter2_{obs1_2_1_1})" + f" * noiseParameter1_{obs1_2_1_1}", + f"(observableParameter1_{obs1_2_2_1} + observableParameter2_{obs1_2_2_1})" + f" * noiseParameter1_{obs1_2_2_1}", ], } ) @@ -418,12 +414,7 @@ def test_flatten_timepoint_specific_output_overrides(): measurement_df_expected = pd.DataFrame( data={ - OBSERVABLE_ID: [ - "obs1__obsParOverride1_1_0__noiseParOverride1__condition1", - "obs1__obsParOverride2_1_0__noiseParOverride1__condition1", - "obs1__obsParOverride2_1_0__noiseParOverride2__condition1", - "obs1__obsParOverride2_1_0__noiseParOverride2__condition1", - ], + OBSERVABLE_ID: [obs1_1_1_1, obs1_2_1_1, obs1_2_2_1, obs1_2_2_1], SIMULATION_CONDITION_ID: [ "condition1", "condition1", @@ -472,8 +463,12 @@ def test_flatten_timepoint_specific_output_overrides(): is False ) - assert problem.observable_df.equals(observable_df_expected) is True - assert problem.measurement_df.equals(measurement_df_expected) is True + pd.testing.assert_frame_equal( + problem.observable_df, observable_df_expected + ) + pd.testing.assert_frame_equal( + problem.measurement_df, measurement_df_expected + ) assert petab.lint_problem(problem) is False @@ -591,8 +586,12 @@ def test_flatten_timepoint_specific_output_overrides_special_cases(): is False ) - assert problem.observable_df.equals(observable_df_expected) is True - assert problem.measurement_df.equals(measurement_df_expected) is True + pd.testing.assert_frame_equal( + problem.observable_df, observable_df_expected + ) + pd.testing.assert_frame_equal( + problem.measurement_df, measurement_df_expected + ) assert petab.lint_problem(problem) is False @@ -842,13 +841,14 @@ def test_get_required_parameters_for_parameter_table(petab_problem): # as part of the proportional error model. assert "observableParameter1_obs1" in noise_placeholders - required_parameters_for_parameter_table = \ + required_parameters_for_parameter_table = ( petab.parameters.get_required_parameters_for_parameter_table( model=petab_problem.model, condition_df=petab_problem.condition_df, observable_df=petab_problem.observable_df, measurement_df=petab_problem.measurement_df, ) + ) # The observable parameter is correctly recognized as a placeholder, # i.e. does not need to be in the parameter table. assert (