Skip to content

Commit

Permalink
Allure test plan support for behave (via allure-framework#531)
Browse files Browse the repository at this point in the history
  • Loading branch information
sseliverstov authored Dec 4, 2020
1 parent 4fa3e3a commit e262568
Show file tree
Hide file tree
Showing 7 changed files with 160 additions and 20 deletions.
47 changes: 37 additions & 10 deletions allure-behave/features/steps/behave_steps.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,25 @@
import os
from tempfile import mkdtemp
import allure_commons
from allure_commons_test.report import AllureReport
from behave.parser import Parser
from behave.runner import ModelRunner
from behave.configuration import Configuration
from behave.formatter._registry import make_formatters
from behave.formatter.base import StreamOpener
import threading
import tempfile
from contextlib import contextmanager


@given(u'feature definition')
@given(u'feature definition {lang}')
def feature_definition(context, **kwargs):
parser = Parser(language=kwargs.get('lang', None))
context.feature_definition = parser.parse(context.text)
feature = parser.parse(context.text)
if hasattr(context, "feature_definition"):
context.feature_definition.append(feature)
else:
context.feature_definition = [feature]


@given(u'hooks implementation')
Expand All @@ -22,26 +28,47 @@ def hooks_implementations(context):
exec(context.text, context.globals)


@given(u'test plan')
def test_plan_helper(context):
tmp_dir = os.environ.get("TEST_TMP")
file, filename = tempfile.mkstemp(suffix=".json", dir=tmp_dir)
os.environ["ALLURE_TESTPLAN_PATH"] = filename
with os.fdopen(file, 'w') as tmp:
tmp.write(context.text)
context.test_plan = filename


@when(u'I run behave with allure formatter')
@when(u'I run behave with allure formatter with options "{args}"')
def run_behave_with_allure(context, **kwargs):
def run(context, **kwargs):
with test_context():
cmd_args = '-f allure_behave.formatter:AllureFormatter'
cmd = '{options} {cmd}'.format(cmd=cmd_args, options=kwargs.get('args', ''))
config = Configuration(command_args=cmd)

result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
stream_opener = StreamOpener(filename=result_tmp_dir)

model_runner = ModelRunner(config, [context.feature_definition])
model_runner = ModelRunner(config, context.feature_definition)
model_runner.formatters = make_formatters(config, [stream_opener])
model_runner.formatters[0].listener.fixture_context.enter()
model_runner.hooks = getattr(context, 'globals', dict())
model_runner.run()

model_runner.formatters[0].listener.__del__()
context.allure_report = AllureReport(result_tmp_dir)

behave_tread = threading.Thread(target=run, args=(context,), kwargs=kwargs)
behave_tread.start()
behave_tread.join()
os.environ.pop("ALLURE_TESTPLAN_PATH", None)


@contextmanager
def test_context():
def _unregister_plugins():
plugins = []
for name, plugin in allure_commons.plugin_manager.list_name_plugin():
allure_commons.plugin_manager.unregister(plugin=plugin, name=name)
plugins.append(plugin)
return plugins

plugins = _unregister_plugins()
yield
_unregister_plugins()
for plugin in plugins:
allure_commons.plugin_manager.register(plugin)
86 changes: 86 additions & 0 deletions allure-behave/features/test_plan.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
Feature: Test plan
Scenario: Select scenarios by fullname
Given feature definition
"""
Feature: Test plan example
Scenario: Scenario with passed step
Given passed step
Scenario: Ignored scenario
Given passed step
"""
Given feature definition
"""
Feature: Another Test plan example
Scenario: Another scenario with passed step
Given passed step
Scenario: Another ignored scenario
Given passed step
"""
Given test plan
"""
{
"version":"1.0",
"tests": [
{
"selector": "<string>:Scenario with passed step"
},
{
"selector": "<string>:Another scenario with passed step"
}
]
}
"""
When I run behave with allure formatter
Then allure report has a scenario with name "Scenario with passed step"
Then allure report has not a scenario with name "Ignored scenario"
Then allure report has a scenario with name "Another scenario with passed step"
Then allure report has not a scenario with name "Another ignored scenario"

Scenario: Select scenarios by allureid
Given feature definition
"""
Feature: Test plan example
@allure.as_id:1
Scenario: Scenario with passed step
Given passed step
@allure.as_id:2
Scenario: Ignored scenario
Given passed step
"""
Given feature definition
"""
Feature: Another Test plan example
@allure.as_id:3
Scenario: Another scenario with passed step
Given passed step
@allure.as_id:4
Scenario: Another ignored scenario
Given passed step
"""
Given test plan
"""
{
"version":"1.0",
"tests": [
{
"id": "1"
},
{
"id": "3"
}
]
}
"""
When I run behave with allure formatter
Then allure report has a scenario with name "Scenario with passed step"
Then allure report has not a scenario with name "Ignored scenario"
Then allure report has a scenario with name "Another scenario with passed step"
Then allure report has not a scenario with name "Another ignored scenario"
5 changes: 5 additions & 0 deletions allure-behave/src/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
import allure_commons
from allure_commons.logger import AllureFileLogger
from allure_behave.listener import AllureListener
from allure_commons.utils import get_testplan
from allure_behave.utils import is_planned_scenario


class AllureFormatter(Formatter):
Expand All @@ -15,12 +17,15 @@ def __init__(self, stream_opener, config):
allure_commons.plugin_manager.register(self.listener)
allure_commons.plugin_manager.register(file_logger)

self.testplan = get_testplan()

def _wrap_scenario(self, scenarios):
for scenario in scenarios:
if isinstance(scenario, ScenarioOutline):
self._wrap_scenario(scenario)
else:
scenario.run = allure_commons.test(scenario.run, context={'scenario': scenario})
is_planned_scenario(scenario, self.testplan)

def feature(self, feature):
self._wrap_scenario(feature.scenarios)
Expand Down
4 changes: 3 additions & 1 deletion allure-behave/src/listener.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from allure_behave.utils import scenario_links
from allure_behave.utils import scenario_labels
from allure_behave.utils import get_fullname
from allure_behave.utils import TEST_PLAN_SKIP_REASON


BEFORE_FIXTURES = ['before_all', 'before_tag', 'before_feature', 'before_scenario']
Expand Down Expand Up @@ -114,7 +115,8 @@ def stop_test(self, parent_uuid, uuid, name, context, exc_type, exc_val, exc_tb)
self.stop_scenario(context['scenario'])

def stop_scenario(self, scenario):
if scenario.status == 'skipped' and not self.behave_config.show_skipped:
if scenario.status == 'skipped' \
and not self.behave_config.show_skipped or scenario.skip_reason == TEST_PLAN_SKIP_REASON:
self.logger.drop_test(self.current_scenario_uuid)
else:
status = scenario_status(scenario)
Expand Down
13 changes: 13 additions & 0 deletions allure-behave/src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from allure_commons.utils import format_exception, format_traceback
from allure_commons.mapping import parse_tag, labels_set

TEST_PLAN_SKIP_REASON = "Not in allure test plan"

STATUS = {
'passed': Status.PASSED,
Expand Down Expand Up @@ -114,3 +115,15 @@ def step_table(step):
table = [','.join(step.table.headings)]
[table.append(','.join(list(row))) for row in step.table.rows]
return '\n'.join(table)


def is_planned_scenario(scenario, test_plan):
if test_plan:
fullname = get_fullname(scenario)
labels = scenario_labels(scenario)
id_labels = list(filter(lambda label: label.name == LabelType.ID, labels))
allure_id = id_labels[0].value if id_labels else None
for item in test_plan:
if (allure_id and allure_id == item.get("id")) or fullname == item.get("selector"):
return
scenario.skip(reason=TEST_PLAN_SKIP_REASON)
11 changes: 2 additions & 9 deletions allure-pytest/src/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,10 @@
import allure
import allure_commons
import os
import json

from allure_commons.types import LabelType
from allure_commons.logger import AllureFileLogger

from allure_commons.utils import get_testplan

from allure_pytest.utils import allure_label, allure_labels, allure_full_name
from allure_pytest.helper import AllureTestHelper
Expand Down Expand Up @@ -148,13 +147,7 @@ def select_by_labels(items, config):


def select_by_testcase(items):
planned_tests = []
file_path = os.environ.get("ALLURE_TESTPLAN_PATH")

if file_path:
with open(file_path, 'r') as plan_file:
plan = json.load(plan_file)
planned_tests = plan.get("tests", [])
planned_tests = get_testplan()

if planned_tests:

Expand Down
14 changes: 14 additions & 0 deletions allure-python-commons/src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,15 @@
import six
import time
import uuid
import json
import socket
import inspect
import hashlib
import platform
import threading
import traceback
import collections

from functools import partial


Expand Down Expand Up @@ -387,3 +389,15 @@ def format_exception(etype, value):
"AssertionError: \\nExpected:...but:..."
"""
return '\n'.join(format_exception_only(etype, value)) if etype or value else None


def get_testplan():
planned_tests = []
file_path = os.environ.get("ALLURE_TESTPLAN_PATH")

if file_path:
with open(file_path, 'r') as plan_file:
plan = json.load(plan_file)
planned_tests = plan.get("tests", [])

return planned_tests

0 comments on commit e262568

Please sign in to comment.