From 96b0a06afa613e9cd8a7bd745ef6c785bd9a15d2 Mon Sep 17 00:00:00 2001 From: Katy Date: Tue, 1 Aug 2023 11:11:57 -0700 Subject: [PATCH 1/3] Possible bugfix for horizon issue --- examples/horizon.py | 8 +++++--- src/prog_algs/predictors/monte_carlo.py | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/examples/horizon.py b/examples/horizon.py index bd5e034b..9d7d1264 100644 --- a/examples/horizon.py +++ b/examples/horizon.py @@ -20,7 +20,7 @@ def run_example(): # Step 1: Setup model & future loading def future_loading(t, x = None): return {} - m = ThrownObject(process_noise = 0.25, measurement_noise = 0.2) + m = ThrownObject(process_noise = 0.12, measurement_noise = 0.1)# ThrownObject(process_noise = 0.25, measurement_noise = 0.2) initial_state = m.initialize() # Step 2: Demonstrating state estimator @@ -50,9 +50,9 @@ def future_loading(t, x = None): # THIS IS WHERE WE DIVERGE FROM THE THROWN_OBJECT_EXAMPLE # Here we set a prediction horizon # We're saying we are not interested in any events that occur after this time - PREDICTION_HORIZON = 7.75 + PREDICTION_HORIZON = 7.672 samples = filt.x # Since we're using a particle filter, which is also sample-based, we can directly use the samples, without changes - STEP_SIZE = 0.01 + STEP_SIZE = 0.001 #0.01 mc_results = mc.predict(samples, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON) print("\nPredicted Time of Event:") metrics = mc_results.time_of_event.metrics() @@ -66,6 +66,8 @@ def future_loading(t, x = None): import matplotlib.pyplot as plt # For plotting plt.show() + debug = 1 + # This allows the module to be executed directly if __name__ == '__main__': run_example() diff --git a/src/prog_algs/predictors/monte_carlo.py b/src/prog_algs/predictors/monte_carlo.py index ec2ac94c..309975f8 100644 --- a/src/prog_algs/predictors/monte_carlo.py +++ b/src/prog_algs/predictors/monte_carlo.py @@ -98,7 +98,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs) # Since horizon is relative to t0 (the simulation starting point), # we must subtract the difference in current t0 from the initial (i.e., prediction t0) # each subsequent simulation - params['horizon'] = HORIZON - (params['t0'] - t0) + # params['horizon'] = HORIZON - (params['t0'] - t0) # Simulate events_remaining = params['events'].copy() @@ -111,6 +111,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs) # Non-vectorized prediction while len(events_remaining) > 0: # Still events to predict + params['horizon'] = HORIZON - (params['t0'] - t0) (t, u, xi, z, es) = simulate_to_threshold(future_loading_eqn, first_output, threshold_keys = events_remaining, From 96639fcf383b493e15a890ccab8cd65cee917377 Mon Sep 17 00:00:00 2001 From: Katy Date: Tue, 15 Aug 2023 16:04:48 -0700 Subject: [PATCH 2/3] Adding horizon tests, cleaning up example --- examples/horizon.py | 10 ++-- src/prog_algs/predictors/monte_carlo.py | 7 +-- tests/__main__.py | 6 ++ tests/test_horizon.py | 77 +++++++++++++++++++++++++ 4 files changed, 90 insertions(+), 10 deletions(-) create mode 100644 tests/test_horizon.py diff --git a/examples/horizon.py b/examples/horizon.py index 9d7d1264..ee8aa037 100644 --- a/examples/horizon.py +++ b/examples/horizon.py @@ -20,7 +20,7 @@ def run_example(): # Step 1: Setup model & future loading def future_loading(t, x = None): return {} - m = ThrownObject(process_noise = 0.12, measurement_noise = 0.1)# ThrownObject(process_noise = 0.25, measurement_noise = 0.2) + m = ThrownObject(process_noise = 0.2, measurement_noise = 0.1) initial_state = m.initialize() # Step 2: Demonstrating state estimator @@ -50,9 +50,9 @@ def future_loading(t, x = None): # THIS IS WHERE WE DIVERGE FROM THE THROWN_OBJECT_EXAMPLE # Here we set a prediction horizon # We're saying we are not interested in any events that occur after this time - PREDICTION_HORIZON = 7.672 + PREDICTION_HORIZON = 7.67 samples = filt.x # Since we're using a particle filter, which is also sample-based, we can directly use the samples, without changes - STEP_SIZE = 0.001 #0.01 + STEP_SIZE = 0.001 mc_results = mc.predict(samples, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON) print("\nPredicted Time of Event:") metrics = mc_results.time_of_event.metrics() @@ -60,14 +60,12 @@ def future_loading(t, x = None): mc_results.time_of_event.plot_hist(keys = 'impact') mc_results.time_of_event.plot_hist(keys = 'falling') - print("\nSamples where impact occurs before horizon: {:.2f}%".format(metrics['impact']['number of samples']/NUM_SAMPLES*100)) + print("\nSamples where impact occurs before horizon: {:.2f}%".format(metrics['impact']['number of samples']/mc.parameters['n_samples']*100)) # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting plt.show() - debug = 1 - # This allows the module to be executed directly if __name__ == '__main__': run_example() diff --git a/src/prog_algs/predictors/monte_carlo.py b/src/prog_algs/predictors/monte_carlo.py index 309975f8..3b2cab5b 100644 --- a/src/prog_algs/predictors/monte_carlo.py +++ b/src/prog_algs/predictors/monte_carlo.py @@ -95,10 +95,6 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs) **params ) else: - # Since horizon is relative to t0 (the simulation starting point), - # we must subtract the difference in current t0 from the initial (i.e., prediction t0) - # each subsequent simulation - # params['horizon'] = HORIZON - (params['t0'] - t0) # Simulate events_remaining = params['events'].copy() @@ -111,6 +107,9 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs) # Non-vectorized prediction while len(events_remaining) > 0: # Still events to predict + # Since horizon is relative to t0 (the simulation starting point), + # we must subtract the difference in current t0 from the initial (i.e., prediction t0) + # each subsequent simulation params['horizon'] = HORIZON - (params['t0'] - t0) (t, u, xi, z, es) = simulate_to_threshold(future_loading_eqn, first_output, diff --git a/tests/__main__.py b/tests/__main__.py index d38fc221..9aec7d5b 100644 --- a/tests/__main__.py +++ b/tests/__main__.py @@ -7,6 +7,7 @@ from .test_metrics import run_tests as metrics_main from .test_visualize import run_tests as visualize_main from .test_tutorials import run_tests as tutorials_main +from .test_horizon import run_tests as horizon_main import unittest import sys @@ -69,5 +70,10 @@ def run_basic_ex(): except Exception: was_successful = False + try: + horizon_main_main() + except Exception: + was_successful = False + if not was_successful: raise Exception('Tests Failed') diff --git a/tests/test_horizon.py b/tests/test_horizon.py new file mode 100644 index 00000000..4aaaf7f5 --- /dev/null +++ b/tests/test_horizon.py @@ -0,0 +1,77 @@ +from io import StringIO +import sys +import unittest +import numpy as np + +from prog_models.models.thrown_object import ThrownObject +from prog_algs import * +from pprint import pprint + +class TestHorizon(unittest.TestCase): + def setUp(self): + # set stdout (so it won't print) + sys.stdout = StringIO() + + def tearDown(self): + sys.stdout = sys.__stdout__ + + def test_horizon_ex(self): + # Setup model + m = ThrownObject(process_noise = 0.25, measurement_noise = 0.2) + # Change parameters (to make simulation faster) + m.parameters['thrower_height'] = 1.0 + m.parameters['throwing_speed'] = 10.0 + initial_state = m.initialize() + + # Define future loading (necessary for prediction call) + def future_loading(t, x = None): + return {} + + # Setup Predictor (smaller sample size for efficiency) + mc = predictors.MonteCarlo(m) + mc.parameters['n_samples'] = 50 + + # Perform a prediction + # With this horizon, all samples will reach 'falling', but only some will reach 'impact' + PREDICTION_HORIZON = 2.127 + STEP_SIZE = 0.001 + mc_results = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON) + + # 'falling' happens before the horizon is met + falling_res = [mc_results.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['falling'] is not None] + self.assertEqual(len(falling_res), mc.parameters['n_samples']) + + # 'impact' happens around the horizon, so some samples have reached this event and others haven't + impact_res = [mc_results.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['impact'] is not None] + self.assertLess(len(impact_res), mc.parameters['n_samples']) + + # Try again with very low prediction_horizon, where no events are reached + # Note: here we count how many None values there are for each event (in the above and below examples, we count values that are NOT None) + mc_results_no_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon = 0.3) + falling_res_no_event = [mc_results_no_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['falling'] is None] + impact_res_no_event = [mc_results_no_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['impact'] is None] + self.assertEqual(len(falling_res_no_event), mc.parameters['n_samples']) + self.assertEqual(len(impact_res_no_event), mc.parameters['n_samples']) + + # Finally, try without horizon, all events should be reached for all samples + mc_results_all_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE) + falling_res_all_event = [mc_results_all_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['falling'] is not None] + impact_res_all_event = [mc_results_all_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['impact'] is not None] + self.assertEqual(len(falling_res_all_event), mc.parameters['n_samples']) + self.assertEqual(len(impact_res_all_event), mc.parameters['n_samples']) + +# This allows the module to be executed directly +def run_tests(): + unittest.main() + +def main(): + load_test = unittest.TestLoader() + runner = unittest.TextTestRunner() + print("\n\nTesting Horizon functionality") + result = runner.run(load_test.loadTestsFromTestCase(TestHorizon)).wasSuccessful() + + if not result: + raise Exception("Failed test") + +if __name__ == '__main__': + main() \ No newline at end of file From 200135dc6d6da417e9b644cae1d801cbd8ddbd85 Mon Sep 17 00:00:00 2001 From: Katy Date: Thu, 17 Aug 2023 13:54:32 -0700 Subject: [PATCH 3/3] removing unused imports --- tests/test_horizon.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/test_horizon.py b/tests/test_horizon.py index 4aaaf7f5..0eb7fa21 100644 --- a/tests/test_horizon.py +++ b/tests/test_horizon.py @@ -1,11 +1,9 @@ from io import StringIO import sys import unittest -import numpy as np from prog_models.models.thrown_object import ThrownObject from prog_algs import * -from pprint import pprint class TestHorizon(unittest.TestCase): def setUp(self): @@ -63,7 +61,7 @@ def future_loading(t, x = None): # This allows the module to be executed directly def run_tests(): unittest.main() - + def main(): load_test = unittest.TestLoader() runner = unittest.TextTestRunner()