Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ Attention: The newest changes should be on top -->

### Changed

- TST: remove remaining files after test session. [#862](https://github.com/RocketPy-Team/RocketPy/pull/862)
- MNT: bumps min python version to 3.10 [#857](https://github.com/RocketPy-Team/RocketPy/pull/857)
- DOC: Update docs dependencies and sub dependencies [#851](https://github.com/RocketPy-Team/RocketPy/pull/851)
- MNT: extract flight data exporters [#845](https://github.com/RocketPy-Team/RocketPy/pull/845)
Expand Down
2 changes: 1 addition & 1 deletion rocketpy/plots/monte_carlo_plots.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
import numpy as np
from matplotlib.transforms import offset_copy

from ..tools import generate_monte_carlo_ellipses, import_optional_dependency

Expand Down
229 changes: 129 additions & 100 deletions tests/integration/simulation/test_monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,18 @@
plt.rcParams.update({"figure.max_open_warning": 0})


def _post_test_file_cleanup():
"""Clean monte carlo files after test session if they exist."""
if os.path.exists("monte_carlo_class_example.kml"):
os.remove("monte_carlo_class_example.kml")
if os.path.exists("monte_carlo_test.errors.txt"):
os.remove("monte_carlo_test.errors.txt")
if os.path.exists("monte_carlo_test.inputs.txt"):
os.remove("monte_carlo_test.inputs.txt")
if os.path.exists("monte_carlo_test.outputs.txt"):
os.remove("monte_carlo_test.outputs.txt")


@pytest.mark.slow
@pytest.mark.parametrize("parallel", [False, True])
def test_monte_carlo_simulate(monte_carlo_calisto, parallel):
Expand All @@ -19,27 +31,29 @@ def test_monte_carlo_simulate(monte_carlo_calisto, parallel):
monte_carlo_calisto : MonteCarlo
The MonteCarlo object, this is a pytest fixture.
"""
# NOTE: this is really slow, it runs 10 flight simulations
monte_carlo_calisto.simulate(
number_of_simulations=10, append=False, parallel=parallel
)

assert monte_carlo_calisto.num_of_loaded_sims == 10
assert monte_carlo_calisto.number_of_simulations == 10
assert str(monte_carlo_calisto.filename.name) == "monte_carlo_test"
assert str(monte_carlo_calisto.error_file.name) == "monte_carlo_test.errors.txt"
assert str(monte_carlo_calisto.output_file.name) == "monte_carlo_test.outputs.txt"
assert np.isclose(
monte_carlo_calisto.processed_results["apogee"][0], 4711, rtol=0.2
)
assert np.isclose(
monte_carlo_calisto.processed_results["impact_velocity"][0],
-5.234,
rtol=0.2,
)
os.remove("monte_carlo_test.errors.txt")
os.remove("monte_carlo_test.outputs.txt")
os.remove("monte_carlo_test.inputs.txt")
try:
# NOTE: this is really slow, it runs 10 flight simulations
monte_carlo_calisto.simulate(
number_of_simulations=10, append=False, parallel=parallel
)

assert monte_carlo_calisto.num_of_loaded_sims == 10
assert monte_carlo_calisto.number_of_simulations == 10
assert str(monte_carlo_calisto.filename.name) == "monte_carlo_test"
assert str(monte_carlo_calisto.error_file.name) == "monte_carlo_test.errors.txt"
assert (
str(monte_carlo_calisto.output_file.name) == "monte_carlo_test.outputs.txt"
)
assert np.isclose(
monte_carlo_calisto.processed_results["apogee"][0], 4711, rtol=0.2
)
assert np.isclose(
monte_carlo_calisto.processed_results["impact_velocity"][0],
-5.234,
rtol=0.2,
)
finally:
_post_test_file_cleanup()


def test_monte_carlo_set_inputs_log(monte_carlo_calisto):
Expand All @@ -50,14 +64,17 @@ def test_monte_carlo_set_inputs_log(monte_carlo_calisto):
monte_carlo_calisto : MonteCarlo
The MonteCarlo object, this is a pytest fixture.
"""
monte_carlo_calisto.input_file = "tests/fixtures/monte_carlo/example.inputs.txt"
monte_carlo_calisto.set_inputs_log()
assert len(monte_carlo_calisto.inputs_log) == 100
assert all(isinstance(item, dict) for item in monte_carlo_calisto.inputs_log)
assert all(
"gravity" in item and "elevation" in item
for item in monte_carlo_calisto.inputs_log
)
try:
monte_carlo_calisto.input_file = "tests/fixtures/monte_carlo/example.inputs.txt"
monte_carlo_calisto.set_inputs_log()
assert len(monte_carlo_calisto.inputs_log) == 100
assert all(isinstance(item, dict) for item in monte_carlo_calisto.inputs_log)
assert all(
"gravity" in item and "elevation" in item
for item in monte_carlo_calisto.inputs_log
)
finally:
_post_test_file_cleanup()


def test_monte_carlo_set_outputs_log(monte_carlo_calisto):
Expand All @@ -68,14 +85,19 @@ def test_monte_carlo_set_outputs_log(monte_carlo_calisto):
monte_carlo_calisto : MonteCarlo
The MonteCarlo object, this is a pytest fixture.
"""
monte_carlo_calisto.output_file = "tests/fixtures/monte_carlo/example.outputs.txt"
monte_carlo_calisto.set_outputs_log()
assert len(monte_carlo_calisto.outputs_log) == 100
assert all(isinstance(item, dict) for item in monte_carlo_calisto.outputs_log)
assert all(
"apogee" in item and "impact_velocity" in item
for item in monte_carlo_calisto.outputs_log
)
try:
monte_carlo_calisto.output_file = (
"tests/fixtures/monte_carlo/example.outputs.txt"
)
monte_carlo_calisto.set_outputs_log()
assert len(monte_carlo_calisto.outputs_log) == 100
assert all(isinstance(item, dict) for item in monte_carlo_calisto.outputs_log)
assert all(
"apogee" in item and "impact_velocity" in item
for item in monte_carlo_calisto.outputs_log
)
finally:
_post_test_file_cleanup()


# def test_monte_carlo_set_errors_log(monte_carlo_calisto):
Expand All @@ -86,22 +108,30 @@ def test_monte_carlo_set_outputs_log(monte_carlo_calisto):

def test_monte_carlo_prints(monte_carlo_calisto):
"""Tests the prints methods of the MonteCarlo class."""
monte_carlo_calisto.info()
monte_carlo_calisto.compare_info(monte_carlo_calisto)
try:
monte_carlo_calisto.info()
monte_carlo_calisto.compare_info(monte_carlo_calisto)
finally:
_post_test_file_cleanup()


@patch("matplotlib.pyplot.show") # pylint: disable=unused-argument
def test_monte_carlo_plots(mock_show, monte_carlo_calisto_pre_loaded):
"""Tests the plots methods of the MonteCarlo class."""
assert monte_carlo_calisto_pre_loaded.all_info() is None
assert (
monte_carlo_calisto_pre_loaded.compare_plots(monte_carlo_calisto_pre_loaded)
is None
)
assert (
monte_carlo_calisto_pre_loaded.compare_ellipses(monte_carlo_calisto_pre_loaded)
is None
)
try:
assert monte_carlo_calisto_pre_loaded.all_info() is None
assert (
monte_carlo_calisto_pre_loaded.compare_plots(monte_carlo_calisto_pre_loaded)
is None
)
assert (
monte_carlo_calisto_pre_loaded.compare_ellipses(
monte_carlo_calisto_pre_loaded
)
is None
)
finally:
_post_test_file_cleanup()


def test_monte_carlo_export_ellipses_to_kml(monte_carlo_calisto_pre_loaded):
Expand All @@ -112,17 +142,18 @@ def test_monte_carlo_export_ellipses_to_kml(monte_carlo_calisto_pre_loaded):
monte_carlo_calisto_pre_loaded : MonteCarlo
The MonteCarlo object, this is a pytest fixture.
"""
assert (
monte_carlo_calisto_pre_loaded.export_ellipses_to_kml(
filename="monte_carlo_class_example.kml",
origin_lat=32.990254,
origin_lon=-106.974998,
type="all",
try:
assert (
monte_carlo_calisto_pre_loaded.export_ellipses_to_kml(
filename="monte_carlo_class_example.kml",
origin_lat=32.990254,
origin_lon=-106.974998,
type="all",
)
is None
)
is None
)

os.remove("monte_carlo_class_example.kml")
finally:
_post_test_file_cleanup()


@pytest.mark.slow
Expand All @@ -134,47 +165,45 @@ def test_monte_carlo_callback(monte_carlo_calisto):
monte_carlo_calisto : MonteCarlo
The MonteCarlo object, this is a pytest fixture.
"""

# define valid data collector
valid_data_collector = {
"name": lambda flight: flight.name,
"density_t0": lambda flight: flight.env.density(0),
}

monte_carlo_calisto.data_collector = valid_data_collector
# NOTE: this is really slow, it runs 10 flight simulations
monte_carlo_calisto.simulate(number_of_simulations=10, append=False)

# tests if print works when we have None in summary
monte_carlo_calisto.info()

## tests if an error is raised for invalid data_collector definitions
# invalid type
def invalid_data_collector(flight):
return flight.name

with pytest.raises(ValueError):
monte_carlo_calisto._check_data_collector(invalid_data_collector)

# invalid key overwrite
invalid_data_collector = {"apogee": lambda flight: flight.apogee}
with pytest.raises(ValueError):
monte_carlo_calisto._check_data_collector(invalid_data_collector)

# invalid callback definition
invalid_data_collector = {"name": "Calisto"} # callbacks must be callables!
with pytest.raises(ValueError):
monte_carlo_calisto._check_data_collector(invalid_data_collector)

# invalid logic (division by zero)
invalid_data_collector = {
"density_t0": lambda flight: flight.env.density(0) / "0",
}
monte_carlo_calisto.data_collector = invalid_data_collector
# NOTE: this is really slow, it runs 10 flight simulations
with pytest.raises(ValueError):
try:
# define valid data collector
valid_data_collector = {
"name": lambda flight: flight.name,
"density_t0": lambda flight: flight.env.density(0),
}

monte_carlo_calisto.data_collector = valid_data_collector
# NOTE: this is really slow, it runs 10 flight simulations
monte_carlo_calisto.simulate(number_of_simulations=10, append=False)

os.remove("monte_carlo_test.errors.txt")
os.remove("monte_carlo_test.outputs.txt")
os.remove("monte_carlo_test.inputs.txt")
# tests if print works when we have None in summary
monte_carlo_calisto.info()

## tests if an error is raised for invalid data_collector definitions
# invalid type
def invalid_data_collector(flight):
return flight.name

with pytest.raises(ValueError):
monte_carlo_calisto._check_data_collector(invalid_data_collector)

# invalid key overwrite
invalid_data_collector = {"apogee": lambda flight: flight.apogee}
with pytest.raises(ValueError):
monte_carlo_calisto._check_data_collector(invalid_data_collector)

# invalid callback definition
invalid_data_collector = {"name": "Calisto"} # callbacks must be callables!
with pytest.raises(ValueError):
monte_carlo_calisto._check_data_collector(invalid_data_collector)

# invalid logic (division by zero)
invalid_data_collector = {
"density_t0": lambda flight: flight.env.density(0) / "0",
}
monte_carlo_calisto.data_collector = invalid_data_collector
# NOTE: this is really slow, it runs 10 flight simulations
with pytest.raises(ValueError):
monte_carlo_calisto.simulate(number_of_simulations=10, append=False)
finally:
_post_test_file_cleanup()
Loading