Skip to content

Commit

Permalink
Merge pull request #112 from ImperialCollegeLondon/feature/vr_run
Browse files Browse the repository at this point in the history
Draft high level function to setup and run simulation
  • Loading branch information
jacobcook1995 authored Nov 17, 2022
2 parents 2e8a551 + 1c83dd3 commit 4abb60f
Show file tree
Hide file tree
Showing 10 changed files with 705 additions and 328 deletions.
454 changes: 214 additions & 240 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ hypothesis = "^6.54.2"
[tool.poetry.group.devenv.dependencies]
black = "^22.6.0"
flake8 = "^4.0.1"
mypy = "^0.981"
mypy = "^0.991"
pre-commit = "^2.19.0"
isort = "^5.10.1"
mdformat = "^0.7.14"
Expand Down
33 changes: 24 additions & 9 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"""

from contextlib import nullcontext as does_not_raise
from logging import CRITICAL, INFO
from logging import CRITICAL, ERROR, INFO
from pathlib import Path

import pytest
Expand Down Expand Up @@ -47,7 +47,7 @@ def test_check_outfile(caplog, mocker):
mock_content.return_value = [Path(f"{file_name}.toml")]

# Check that check_outfile fails as expected
with pytest.raises(OSError):
with pytest.raises(config.ConfigurationError):
config.check_outfile(".", file_name)

expected_log_entries = (
Expand Down Expand Up @@ -366,14 +366,21 @@ def test_extend_with_default():
(),
),
(
{"basybuedb"},
{"core": {"grid": {"nx": -125, "ny": -10}}},
None,
pytest.raises(config.ConfigurationError),
(
(
ERROR,
"[core][grid][nx]: -125 is less than or equal to the minimum of 0",
),
(
ERROR,
"[core][grid][ny]: -10 is less than or equal to the minimum of 0",
),
(
CRITICAL,
"Validation of core configuration files failed: {'basybuedb'} is "
"not of type 'object'",
"Validation of core configuration files failed see above errors",
),
),
),
Expand Down Expand Up @@ -428,14 +435,22 @@ def test_missing_core_schema(caplog, mocker):
(),
),
(
{"basybuedb"},
{"soil": {"no_layers": -1}},
None,
pytest.raises(config.ConfigurationError),
(
(
ERROR,
"[plants]: 'ftypes' is a required property",
),
(
ERROR,
"[soil][no_layers]: -1 is less than or equal to the minimum of 0",
),
(
CRITICAL,
"Validation of configuration files failed: {'basybuedb'} is not of"
" type 'object'",
"Validation of complete configuration files failed see above "
"errors",
),
),
),
Expand All @@ -446,7 +461,7 @@ def test_validate_with_defaults(
):
"""Test that addition of defaults values during configuration works as desired."""

comb_schema = config.construct_combined_schema(["core", "plants"])
comb_schema = config.construct_combined_schema(["core", "plants", "soil"])

# Check that find_schema fails as expected
with raises:
Expand Down
217 changes: 217 additions & 0 deletions tests/test_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
"""Test module for main.py (and associated functionality).
This module tests both the main simulation function `vr_run` and the other functions
defined in main.py that it calls.
"""

from contextlib import nullcontext as does_not_raise
from logging import CRITICAL, ERROR, INFO

import pytest

from virtual_rainforest.core.model import BaseModel, InitialisationError
from virtual_rainforest.main import configure_models, select_models, vr_run

from .conftest import log_check


@pytest.mark.parametrize(
"model_list,no_models,raises,expected_log_entries",
[
(
["soil"], # valid input
1,
does_not_raise(),
(
(
INFO,
"Attempting to configure the following models: ['soil']",
),
),
),
(
["soil", "core"],
1,
does_not_raise(),
(
(
INFO,
"Attempting to configure the following models: ['soil']",
),
),
),
(
["soil", "freshwater"], # Model that hasn't been defined
0,
pytest.raises(InitialisationError),
(
(
INFO,
"Attempting to configure the following models: ['freshwater', "
"'soil']",
),
(
CRITICAL,
"The following models cannot be configured as they are not found in"
" the registry: ['freshwater']",
),
),
),
],
)
def test_select_models(caplog, model_list, no_models, raises, expected_log_entries):
"""Test the model selecting function."""

with raises:
models = select_models(model_list)
assert len(models) == no_models
assert all([type(model) == type(BaseModel) for model in models])

log_check(caplog, expected_log_entries)


@pytest.mark.parametrize(
"config,output,raises,expected_log_entries",
[
(
{ # valid config
"soil": {"no_layers": 1},
"core": {"timing": {"min_time_step": "7 days"}},
},
"SoilModel(update_interval = 10080 minutes, no_layers = 1)",
does_not_raise(),
(
(INFO, "Attempting to configure the following models: ['soil']"),
(
INFO,
"Information required to initialise the soil model successfully "
"extracted.",
),
),
),
(
{ # invalid soil config tag
"soil": {"no_layers": -1},
"core": {"timing": {"min_time_step": "7 days"}},
},
None,
pytest.raises(InitialisationError),
(
(INFO, "Attempting to configure the following models: ['soil']"),
(
INFO,
"Information required to initialise the soil model successfully "
"extracted.",
),
(
CRITICAL,
"There has to be at least one soil layer in the soil model!",
),
(
CRITICAL,
"Could not configure all the desired models, ending the "
"simulation.",
),
),
),
(
{ # min_time_step missing units
"soil": {"no_layers": 1},
"core": {"timing": {"min_time_step": "7"}},
},
None,
pytest.raises(InitialisationError),
(
(INFO, "Attempting to configure the following models: ['soil']"),
(
ERROR,
"Configuration types appear not to have been properly validated. "
"This problem prevents initialisation of the soil model. The first "
"instance of this problem is as follows: Cannot convert from "
"'dimensionless' (dimensionless) to 'minute' ([time])",
),
(
CRITICAL,
"Could not configure all the desired models, ending the "
"simulation.",
),
),
),
],
)
def test_configure_models(caplog, config, output, raises, expected_log_entries):
"""Test the function that configures the models."""

with raises:
model_list = select_models(["soil"])

models = configure_models(config, model_list)

if output is None:
assert models == [None]
else:
assert repr(models[0]) == output

log_check(caplog, expected_log_entries)


def test_vr_run_miss_model(mocker, caplog):
"""Test the main `vr_run` function handles missing models correctly."""

mock_conf = mocker.patch("virtual_rainforest.main.validate_config")
mock_conf.return_value = {"core": {"modules": ["topsoil"]}}

with pytest.raises(InitialisationError):
vr_run("tests/fixtures/all_config.toml", ".", "delete_me")

expected_log_entries = (
(INFO, "Attempting to configure the following models: ['topsoil']"),
(
CRITICAL,
"The following models cannot be configured as they are not found in the "
"registry: ['topsoil']",
),
)

log_check(caplog, expected_log_entries)


def test_vr_run_bad_model(mocker, caplog):
"""Test the main `vr_run` function handles bad model configuration correctly."""

mock_conf = mocker.patch("virtual_rainforest.main.validate_config")
mock_conf.return_value = {
"core": {
"modules": ["soil"],
"timing": {
"start_date": "2020-01-01",
"end_date": "2120-01-01",
"min_time_step": "0.5 martian days",
},
},
"soil": {},
}

with pytest.raises(InitialisationError):
vr_run("tests/fixtures/all_config.toml", ".", "delete_me")

expected_log_entries = (
(INFO, "Attempting to configure the following models: ['soil']"),
(
INFO,
"All models found in the registry, now attempting to configure them.",
),
(
ERROR,
"Configuration types appear not to have been properly validated. This "
"problem prevents initialisation of the soil model. The first instance of "
"this problem is as follows: 'martian' is not defined in the unit registry",
),
(
CRITICAL,
"Could not configure all the desired models, ending the simulation. The "
"following models failed: ['soil'].",
),
)

log_check(caplog, expected_log_entries)
Loading

0 comments on commit 4abb60f

Please sign in to comment.