Skip to content

Commit

Permalink
AUTO docusaurus 20210302
Browse files Browse the repository at this point in the history
  • Loading branch information
GitHub CI committed Mar 2, 2021
1 parent 581cbcc commit 1b03dd7
Show file tree
Hide file tree
Showing 39 changed files with 322 additions and 6 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/continous-integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,7 @@ jobs:
matrix:
os: [ubuntu-latest, windows-latest]
python-version: [3.6, 3.7, 3.8]
test: [test-non-training, test-training]

steps:
- name: Checkout git repository 🕝
Expand Down Expand Up @@ -277,7 +278,7 @@ jobs:
JOBS: 2
PYTHONIOENCODING: "utf-8"
run: |
make test
make ${{ matrix.test }}
- name: Send Coverage Report 📊
if: needs.changes.outputs.backend == 'true' && matrix.python-version == 3.6 && matrix.os != 'windows-latest'
Expand Down
8 changes: 8 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,14 @@ else
set -o allexport; source tests_deployment/.env && OMP_NUM_THREADS=1 poetry run pytest $(INTEGRATION_TEST_FOLDER) -n $(JOBS) -m $(INTEGRATION_TEST_PYTEST_MARKERS) && set +o allexport
endif

test-non-training: clean
# OMP_NUM_THREADS can improve overall performance using one thread by process (on tensorflow), avoiding overload
RAISE_ON_TRAIN=True OMP_NUM_THREADS=1 poetry run pytest tests -n $(JOBS) --cov rasa -m "not trains_model" --ignore $(INTEGRATION_TEST_FOLDER)

test-training: clean
# OMP_NUM_THREADS can improve overall performance using one thread by process (on tensorflow), avoiding overload
OMP_NUM_THREADS=1 poetry run pytest tests -n $(JOBS) --cov rasa -m "trains_model" --ignore $(INTEGRATION_TEST_FOLDER)

generate-pending-changelog:
poetry run python -c "from scripts import release; release.generate_changelog('major.minor.patch')"

Expand Down
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,21 @@ JOBS=[n] make test

Where `[n]` is the number of jobs desired. If omitted, `[n]` will be automatically chosen by pytest.

#### Tests that train
A test that trains a model is defined as any test that explicitly or inadvertently calls any method annotated with `@rasa.shared.utils.common.raise_on_unexpected_train`.
Currently, this is: `nlu.train`, `core.train`, `Agent.train`, and `Trainer.train`.

We specify tests that train a model using the pytest mark `trains_model`.
e.g:

@pytest.mark.trains_model
def test_some_training()
...

These test are then run separately in the CI using the make commands `make test-non-training` and `make test-training` respectively.

The command `make test-non-training` will fail if training occurs.


### Running the Integration Tests

Expand Down
2 changes: 1 addition & 1 deletion docs/docs/sources/rasa_interactive___help.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ optional arguments:
--conversation-id CONVERSATION_ID
Specify the id of the conversation the messages are
in. Defaults to a UUID that will be randomly
generated. (default: c515fc57a91447659a3be492af8ed275)
generated. (default: 2e845bcf5b864136b633b9a3edc2f91f)
--endpoints ENDPOINTS
Configuration file for the model server and the
connectors as a yml file. (default: None)
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/sources/rasa_shell___help.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ optional arguments:
-h, --help show this help message and exit
--conversation-id CONVERSATION_ID
Set the conversation ID. (default:
189e94bf710a44488bc8ee798248ddaa)
64abaf20d0f44917a6fbeeeb2f9046aa)
-m MODEL, --model MODEL
Path to a trained Rasa model. If a directory is
specified, it will use the latest model in this
Expand Down
1 change: 1 addition & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@
markers =
skip_on_windows: mark a test as a test that shouldn't be executed on Windows.
sequential: mark tests than cannot be ran in parallel (e.g. because they need access to the same resource).
trains_model: mark a test that trains a model through either `nlu.train` or `core.train`.
# see https://pypi.org/project/pytest-timeout/
timeout = 120
1 change: 0 additions & 1 deletion rasa/nlu/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from rasa.utils import io as io_utils
from rasa.utils.endpoints import EndpointConfig


if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.nlu.training_data.training_data import TrainingData
Expand Down
8 changes: 8 additions & 0 deletions tests/cli/test_rasa_test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import os
from shutil import copyfile

import pytest

from rasa.core.test import CONFUSION_MATRIX_STORIES_FILE
from rasa.constants import RESULTS_FILE
from rasa.shared.constants import DEFAULT_RESULTS_PATH
Expand Down Expand Up @@ -32,6 +34,7 @@ def test_test_core_with_no_model(run_in_simple_project: Callable[..., RunResult]
)


@pytest.mark.trains_model
def test_test(run_in_simple_project_with_model: Callable[..., RunResult]):
write_yaml(
{
Expand All @@ -48,6 +51,7 @@ def test_test(run_in_simple_project_with_model: Callable[..., RunResult]):
assert os.path.exists("results/intent_confusion_matrix.png")


@pytest.mark.trains_model
def test_test_with_no_user_utterance(
run_in_simple_project_with_model: Callable[..., RunResult]
):
Expand Down Expand Up @@ -81,6 +85,7 @@ def test_test_no_plot(run_in_simple_project: Callable[..., RunResult]):
assert not os.path.exists("results/story_confmat.pdf")


@pytest.mark.trains_model
def test_test_nlu(run_in_simple_project_with_model: Callable[..., RunResult]):
run_in_simple_project_with_model("test", "nlu", "--nlu", "data", "--successes")

Expand All @@ -96,6 +101,7 @@ def test_test_nlu_no_plot(run_in_simple_project: Callable[..., RunResult]):
assert not os.path.exists("results/intent_confusion_matrix.png")


@pytest.mark.trains_model
def test_test_nlu_cross_validation(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"test", "nlu", "--cross-validation", "-c", "config.yml", "-f", "2", "-r", "1"
Expand Down Expand Up @@ -126,6 +132,7 @@ def test_test_nlu_comparison(run_in_simple_project: Callable[..., RunResult]):
assert os.path.exists("results/run_2")


@pytest.mark.trains_model
def test_test_core_comparison(
run_in_simple_project_with_model: Callable[..., RunResult]
):
Expand All @@ -145,6 +152,7 @@ def test_test_core_comparison(
assert os.path.exists(os.path.join(DEFAULT_RESULTS_PATH, RESULTS_FILE))


@pytest.mark.trains_model
def test_test_core_comparison_after_train(
run_in_simple_project: Callable[..., RunResult]
):
Expand Down
14 changes: 14 additions & 0 deletions tests/cli/test_rasa_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
)


@pytest.mark.trains_model
def test_train(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()

Expand Down Expand Up @@ -69,6 +70,7 @@ def test_train_finetune(
assert "No NLU model for finetuning found" in output


@pytest.mark.trains_model
def test_train_persist_nlu_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()

Expand Down Expand Up @@ -100,6 +102,7 @@ def test_train_persist_nlu_data(run_in_simple_project: Callable[..., RunResult])
)


@pytest.mark.trains_model
def test_train_core_compare(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()

Expand Down Expand Up @@ -144,6 +147,7 @@ def test_train_core_compare(run_in_simple_project: Callable[..., RunResult]):
assert model_files[0].endswith("tar.gz")


@pytest.mark.trains_model
def test_train_no_domain_exists(
run_in_simple_project: Callable[..., RunResult]
) -> None:
Expand Down Expand Up @@ -172,6 +176,7 @@ def test_train_no_domain_exists(
assert os.path.exists(metadata_path)


@pytest.mark.trains_model
def test_train_skip_on_model_not_changed(
run_in_simple_project_with_model: Callable[..., RunResult]
):
Expand All @@ -190,6 +195,7 @@ def test_train_skip_on_model_not_changed(
assert file_name == files[0]


@pytest.mark.trains_model
def test_train_force(run_in_simple_project_with_model: Callable[..., RunResult]):
temp_dir = os.getcwd()

Expand All @@ -204,6 +210,7 @@ def test_train_force(run_in_simple_project_with_model: Callable[..., RunResult])
assert len(files) == 2


@pytest.mark.trains_model
def test_train_dry_run(run_in_simple_project_with_model: Callable[..., RunResult]):
temp_dir = os.getcwd()

Expand All @@ -217,6 +224,7 @@ def test_train_dry_run(run_in_simple_project_with_model: Callable[..., RunResult
assert output.ret == 0


@pytest.mark.trains_model
def test_train_dry_run_failure(
run_in_simple_project_with_model: Callable[..., RunResult]
):
Expand All @@ -243,6 +251,7 @@ def test_train_dry_run_failure(
) and (output.ret & CODE_FORCED_TRAINING != CODE_FORCED_TRAINING)


@pytest.mark.trains_model
def test_train_dry_run_force(
run_in_simple_project_with_model: Callable[..., RunResult]
):
Expand All @@ -258,6 +267,7 @@ def test_train_dry_run_force(
assert output.ret == CODE_FORCED_TRAINING


@pytest.mark.trains_model
def test_train_with_only_nlu_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = Path.cwd()

Expand All @@ -273,6 +283,7 @@ def test_train_with_only_nlu_data(run_in_simple_project: Callable[..., RunResult
assert os.path.basename(files[0]) == "test-model.tar.gz"


@pytest.mark.trains_model
def test_train_with_only_core_data(run_in_simple_project: Callable[..., RunResult]):
temp_dir = os.getcwd()

Expand All @@ -287,6 +298,7 @@ def test_train_with_only_core_data(run_in_simple_project: Callable[..., RunResul
assert os.path.basename(files[0]) == "test-model.tar.gz"


@pytest.mark.trains_model
def test_train_core(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"train",
Expand Down Expand Up @@ -329,6 +341,7 @@ def test_train_core_no_domain_exists(run_in_simple_project: Callable[..., RunRes
assert not os.path.isfile("train_rasa_models_no_domain/rasa-model.tar.gz")


@pytest.mark.trains_model
def test_train_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"train",
Expand All @@ -354,6 +367,7 @@ def test_train_nlu(run_in_simple_project: Callable[..., RunResult]):
)


@pytest.mark.trains_model
def test_train_nlu_persist_nlu_data(
run_in_simple_project: Callable[..., RunResult]
) -> None:
Expand Down
32 changes: 32 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import copy
import functools
import os
import random
import pytest
Expand Down Expand Up @@ -448,3 +449,34 @@ class AsyncMock(Mock):

async def __call__(self, *args: Any, **kwargs: Any) -> Any:
return super().__call__(*args, **kwargs)


def raise_on_unexpected_train(f: Callable) -> Callable:
@functools.wraps(f)
def decorated(*args, **kwargs):
if os.environ.get("RAISE_ON_TRAIN") == "True":
raise ValueError(
"Training called and RAISE_ON_TRAIN is set. "
"See https://github.com/RasaHQ/rasa#tests-that-train"
)
return f(*args, **kwargs)

return decorated


def wrap_training_methods() -> None:
"""Wrap methods that train so they fail if RAISE_ON_TRAIN is set.
See "Tests that train" section in rasa/README.md.
"""
import rasa.nlu as nlu
import rasa.core as core
from rasa.nlu.model import Trainer
from rasa.core.agent import Agent

for training_module in [nlu, core, Trainer, Agent]:
training_module.train = raise_on_unexpected_train(training_module.train)


def pytest_configure():
wrap_training_methods()
1 change: 1 addition & 0 deletions tests/core/actions/test_forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ async def test_activate_with_prefilled_slot():
]


@pytest.mark.trains_model
async def test_switch_forms_with_same_slot(default_agent: Agent):
"""Tests switching of forms, where the first slot is the same in both forms.
Expand Down
3 changes: 3 additions & 0 deletions tests/core/actions/test_two_stage_fallback.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ async def test_ask_affirmation(events: List[Event]):
assert isinstance(events[1], BotUttered)


@pytest.mark.trains_model
async def test_1st_affirmation_is_successful(default_processor: MessageProcessor):
tracker = DialogueStateTracker.from_events(
"some-sender",
Expand Down Expand Up @@ -178,6 +179,7 @@ async def test_ask_rephrase_after_failed_affirmation():
assert bot_utterance.text == rephrase_text


@pytest.mark.trains_model
async def test_ask_rephrasing_successful(default_processor: MessageProcessor):
tracker = DialogueStateTracker.from_events(
"some-sender",
Expand Down Expand Up @@ -254,6 +256,7 @@ async def test_ask_affirm_after_rephrasing():
assert isinstance(events[0], BotUttered)


@pytest.mark.trains_model
async def test_2nd_affirm_successful(default_processor: MessageProcessor):
tracker = DialogueStateTracker.from_events(
"some-sender",
Expand Down
6 changes: 6 additions & 0 deletions tests/core/featurizers/test_single_state_featurizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ def test_single_state_featurizer_creates_encoded_all_actions():


@pytest.mark.timeout(300) # these can take a longer time than the default timeout
@pytest.mark.trains_model
def test_single_state_featurizer_with_entity_roles_and_groups(
unpacked_trained_moodbot_path: Text,
):
Expand Down Expand Up @@ -226,6 +227,7 @@ def test_single_state_featurizer_with_entity_roles_and_groups(


@pytest.mark.timeout(300) # these can take a longer time than the default timeout
@pytest.mark.trains_model
def test_single_state_featurizer_with_bilou_entity_roles_and_groups(
unpacked_trained_moodbot_path: Text,
):
Expand Down Expand Up @@ -307,6 +309,7 @@ def test_single_state_featurizer_uses_dtype_float():


@pytest.mark.timeout(300) # these can take a longer time than the default timeout
@pytest.mark.trains_model
def test_single_state_featurizer_with_interpreter_state_with_action_listen(
unpacked_trained_moodbot_path: Text,
):
Expand Down Expand Up @@ -371,6 +374,7 @@ def test_single_state_featurizer_with_interpreter_state_with_action_listen(


@pytest.mark.timeout(300) # these can take a longer time than the default timeout
@pytest.mark.trains_model
def test_single_state_featurizer_with_interpreter_state_not_with_action_listen(
unpacked_trained_moodbot_path: Text,
):
Expand Down Expand Up @@ -408,6 +412,7 @@ def test_single_state_featurizer_with_interpreter_state_not_with_action_listen(


@pytest.mark.timeout(300) # these can take a longer time than the default timeout
@pytest.mark.trains_model
def test_single_state_featurizer_with_interpreter_state_with_no_action_name(
unpacked_trained_moodbot_path: Text,
):
Expand Down Expand Up @@ -471,6 +476,7 @@ def test_to_sparse_sentence_features():


@pytest.mark.timeout(300) # these can take a longer time than the default timeout
@pytest.mark.trains_model
def test_single_state_featurizer_uses_regex_interpreter(
unpacked_trained_moodbot_path: Text,
):
Expand Down
1 change: 1 addition & 0 deletions tests/core/policies/test_ted_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def test_diagnostics():


class TestTEDPolicy(PolicyTestCollection):
@pytest.mark.trains_model
def test_train_model_checkpointing(self, tmp_path: Path):
model_name = "core-checkpointed-model"
best_model_file = tmp_path / (model_name + ".tar.gz")
Expand Down
Loading

0 comments on commit 1b03dd7

Please sign in to comment.