Skip to content

Commit 1b56107

Browse files
[Fix] unit test
1 parent 4c1f7a6 commit 1b56107

File tree

11 files changed

+168
-196
lines changed

11 files changed

+168
-196
lines changed

.coveragerc

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# .coveragerc to control coverage.py
2+
[run]
3+
branch = True
4+
5+
[report]
6+
# Regexes for lines to exclude from consideration
7+
exclude_lines =
8+
# Have to re-enable the standard pragma
9+
pragma: no cover
10+
11+
# Don't complain about missing debug-only code:
12+
def __repr__
13+
if self\.debug
14+
15+
# Don't complain if tests don't hit defensive assertion code:
16+
raise AssertionError
17+
raise NotImplementedError
18+
19+
# Don't complain if non-runnable code isn't run:
20+
if 0:
21+
if __name__ == .__main__.:
22+
23+
ignore_errors = True
24+
25+
[html]
26+
directory = coverage_html_report

.github/workflows/pytest.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ jobs:
3232
echo "::set-output name=BEFORE::$(git status --porcelain -b)"
3333
- name: Run tests
3434
run: |
35-
if [ ${{ matrix.code-cov }} ]; then codecov='--cov=autoPyTorch --cov-report=xml'; fi
35+
if [ ${{ matrix.code-cov }} ]; then codecov='--cov=autoPyTorch --cov-report=xml --cov-config=.coveragerc'; fi
3636
python -m pytest --forked --durations=20 --timeout=600 --timeout-method=signal -v $codecov test
3737
- name: Check for files left behind by test
3838
if: ${{ always() }}

autoPyTorch/datasets/tabular_dataset.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,18 +24,6 @@
2424
)
2525

2626

27-
class Value2Index(object):
28-
def __init__(self, values: list):
29-
assert all(not (pd.isna(v)) for v in values)
30-
self.values = {v: i for i, v in enumerate(values)}
31-
32-
def __getitem__(self, item: Any) -> int:
33-
if pd.isna(item):
34-
return 0
35-
else:
36-
return self.values[item] + 1
37-
38-
3927
class TabularDataset(BaseDataset):
4028
"""
4129
Base class for datasets used in AutoPyTorch

autoPyTorch/search_space/__init__.py

Whitespace-only changes.

autoPyTorch/search_space/search_space.py

Lines changed: 0 additions & 153 deletions
This file was deleted.

test/conftest.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import logging.handlers
12
import os
23
import re
34
import shutil
@@ -299,6 +300,7 @@ def get_fit_dictionary(X, y, validator, backend):
299300
'metrics_during_training': True,
300301
'split_id': 0,
301302
'backend': backend,
303+
'logger_port': logging.handlers.DEFAULT_TCP_LOGGING_PORT,
302304
}
303305
backend.save_datamanager(datamanager)
304306
return fit_dictionary

test/test_api/test_api.py

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
import unittest
66
from test.test_api.utils import dummy_do_dummy_prediction, dummy_eval_function
77

8+
import ConfigSpace as CS
9+
810
import numpy as np
911

1012
import pandas as pd
@@ -97,15 +99,15 @@ def test_tabular_classification(openml_id, resampling_strategy, backend, resampl
9799
assert len(loaded_datamanager.train_tensors) == len(estimator.dataset.train_tensors)
98100

99101
expected_files = [
100-
'smac3-output/run_1/configspace.json',
101-
'smac3-output/run_1/runhistory.json',
102-
'smac3-output/run_1/scenario.txt',
103-
'smac3-output/run_1/stats.json',
104-
'smac3-output/run_1/train_insts.txt',
105-
'smac3-output/run_1/trajectory.json',
102+
'smac3-output/run_42/configspace.json',
103+
'smac3-output/run_42/runhistory.json',
104+
'smac3-output/run_42/scenario.txt',
105+
'smac3-output/run_42/stats.json',
106+
'smac3-output/run_42/train_insts.txt',
107+
'smac3-output/run_42/trajectory.json',
106108
'.autoPyTorch/datamanager.pkl',
107109
'.autoPyTorch/ensemble_read_preds.pkl',
108-
'.autoPyTorch/start_time_1',
110+
'.autoPyTorch/start_time_42',
109111
'.autoPyTorch/ensemble_history.json',
110112
'.autoPyTorch/ensemble_read_losses.pkl',
111113
'.autoPyTorch/true_targets_ensemble.npy',
@@ -212,6 +214,9 @@ def test_tabular_classification(openml_id, resampling_strategy, backend, resampl
212214
# Test refit on dummy data
213215
estimator.refit(dataset=backend.load_datamanager())
214216

217+
# Make sure that a configuration space is stored in the estimator
218+
assert isinstance(estimator.get_search_space(), CS.ConfigurationSpace)
219+
215220

216221
@pytest.mark.parametrize('openml_name', ("boston", ))
217222
@unittest.mock.patch('autoPyTorch.evaluation.train_evaluator.eval_function',
@@ -284,15 +289,15 @@ def test_tabular_regression(openml_name, resampling_strategy, backend, resamplin
284289
assert len(loaded_datamanager.train_tensors) == len(estimator.dataset.train_tensors)
285290

286291
expected_files = [
287-
'smac3-output/run_1/configspace.json',
288-
'smac3-output/run_1/runhistory.json',
289-
'smac3-output/run_1/scenario.txt',
290-
'smac3-output/run_1/stats.json',
291-
'smac3-output/run_1/train_insts.txt',
292-
'smac3-output/run_1/trajectory.json',
292+
'smac3-output/run_42/configspace.json',
293+
'smac3-output/run_42/runhistory.json',
294+
'smac3-output/run_42/scenario.txt',
295+
'smac3-output/run_42/stats.json',
296+
'smac3-output/run_42/train_insts.txt',
297+
'smac3-output/run_42/trajectory.json',
293298
'.autoPyTorch/datamanager.pkl',
294299
'.autoPyTorch/ensemble_read_preds.pkl',
295-
'.autoPyTorch/start_time_1',
300+
'.autoPyTorch/start_time_42',
296301
'.autoPyTorch/ensemble_history.json',
297302
'.autoPyTorch/ensemble_read_losses.pkl',
298303
'.autoPyTorch/true_targets_ensemble.npy',

test/test_api/test_base_api.py

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,15 @@
1+
import logging
2+
import re
13
import unittest
4+
from unittest.mock import MagicMock
25

36
import numpy as np
47

58
import pytest
69

7-
from autoPyTorch.api.base_task import BaseTask
10+
from autoPyTorch.api.base_task import BaseTask, _pipeline_predict
11+
from autoPyTorch.constants import TABULAR_CLASSIFICATION, TABULAR_REGRESSION
12+
from autoPyTorch.pipeline.tabular_classification import TabularClassificationPipeline
813

914

1015
# ====
@@ -29,6 +34,10 @@ def test_nonsupported_arguments(fit_dictionary_tabular):
2934
api.ensemble_ = unittest.mock.MagicMock()
3035
with pytest.raises(ValueError, match=r".*No metric found. Either fit/search has not been.*"):
3136
api.score(np.ones(10), np.ones(10))
37+
api.task_type = None
38+
api._metric = MagicMock()
39+
with pytest.raises(ValueError, match=r".*AutoPytorch failed to infer a task type*"):
40+
api.score(np.ones(10), np.ones(10))
3241
api._metric = unittest.mock.MagicMock()
3342
with pytest.raises(ValueError, match=r".*No valid model found in run history.*"):
3443
api._load_models()
@@ -44,3 +53,37 @@ def returnfalse():
4453
api.predict(np.ones((10, 10)))
4554
with pytest.raises(ValueError, match=r".*No ensemble found. Either fit has not yet.*"):
4655
api.predict(np.ones((10, 10)))
56+
57+
58+
def test_pipeline_predict_function():
59+
X = np.ones((10, 10))
60+
pipeline = MagicMock()
61+
pipeline.predict.return_value = np.full((10,), 3)
62+
pipeline.predict_proba.return_value = np.full((10, 2), 3)
63+
64+
# First handle the classification case
65+
task = TABULAR_CLASSIFICATION
66+
with pytest.raises(ValueError, match='prediction probability not within'):
67+
_pipeline_predict(pipeline, X, 5, logging.getLogger, task)
68+
pipeline.predict_proba.return_value = np.zeros((10, 2))
69+
predictions = _pipeline_predict(pipeline, X, 5, logging.getLogger(), task)
70+
assert np.shape(predictions) == (10, 2)
71+
72+
task = TABULAR_REGRESSION
73+
predictions = _pipeline_predict(pipeline, X, 5, logging.getLogger(), task)
74+
assert np.shape(predictions) == (10,)
75+
# Trigger warning msg with different shape for prediction
76+
pipeline.predict.return_value = np.full((12,), 3)
77+
predictions = _pipeline_predict(pipeline, X, 5, logging.getLogger(), task)
78+
79+
80+
@pytest.mark.parametrize("fit_dictionary_tabular", ['classification_categorical_only'], indirect=True)
81+
def test_show_models(fit_dictionary_tabular):
82+
api = BaseTask()
83+
api.ensemble_ = MagicMock()
84+
api.models_ = [TabularClassificationPipeline(dataset_properties=fit_dictionary_tabular['dataset_properties'])]
85+
api.ensemble_.get_models_with_weights.return_value = [(1.0, api.models_[0])]
86+
# Expect the default configuration
87+
expected = (r"0\s+|\s+SimpleImputer,OneHotEncoder,NoScaler,NoFeaturePreprocessing\s+"
88+
r"|\s+no embedding,ShapedMLPBackbone,FullyConnectedHead,nn.Sequential\s+|\s+1")
89+
assert re.search(expected, api.show_models()) is not None

test/test_evaluation/test_evaluation.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,15 @@
1010

1111
import pynisher
1212

13+
import pytest
14+
1315
from smac.runhistory.runhistory import RunInfo
1416
from smac.stats.stats import Stats
1517
from smac.tae import StatusType
18+
from smac.utils.constants import MAXINT
1619

1720
from autoPyTorch.evaluation.tae import ExecuteTaFuncWithQueue, get_cost_of_crash
18-
from autoPyTorch.pipeline.components.training.metrics.metrics import accuracy
21+
from autoPyTorch.pipeline.components.training.metrics.metrics import accuracy, log_loss
1922

2023
this_directory = os.path.dirname(__file__)
2124
sys.path.append(this_directory)
@@ -389,3 +392,8 @@ def test_silent_exception_in_target_function(self):
389392
self.assertNotIn('exitcode', info[1].additional_info)
390393
self.assertNotIn('exit_status', info[1].additional_info)
391394
self.assertNotIn('traceback', info[1])
395+
396+
397+
@pytest.mark.parametrize("metric,expected", [(accuracy, 1.0), (log_loss, MAXINT)])
398+
def test_get_cost_of_crash(metric, expected):
399+
assert get_cost_of_crash(metric) == expected

0 commit comments

Comments
 (0)