Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 11 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,19 +186,21 @@ You can use environment variable to control certain features of testomat.io


#### Test Run configuration
| Env variable | What it does | Examples |
|--------------------------|----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| TESTOMATIO_TITLE | Name of a test run to create on testomat.io | TESTOMATIO_TITLE="Nightly Smoke Tests" pytest --testomatio report |
| TESTOMATIO_RUN_ID | Id of existing test run to use for sending test results to | TESTOMATIO_RUN_ID=98dfas0 pytest --testomatio report |
| TESTOMATIO_RUNGROUP_TITLE | Create a group (folder) for a test run. If group already exists, attach test run to it | TESTOMATIO_RUNGROUP_TITLE="Release 2.0" pytest --testomatio report |
| TESTOMATIO_ENV | Assign environment to a test run, env variant of **testRunEnv** option. Has a lower precedence than **testRunEnv** option. | TESTOMATIO_ENV="linux,chrome,1920x1080" pytest --testomatio report |
| TESTOMATIO_LABEL | Assign labels to a test run. Labels must exist in project and their scope must be enabled for runs | TESTOMATIO_LABEL="smoke,regression" pytest --testomatio report |
| TESTOMATIO_UPDATE_CODE | Send code of your test to Testomat.io on each run. If not enabled(default) assumes the code is pushed using **sync** command | TESTOMATIO_UPDATE_CODE=True pytest --testomatio report |
| TESTOMATIO_EXCLUDE_SKIPPED | Exclude skipped tests from the report | TESTOMATIO_EXCLUDE_SKIPPED=1 pytest --testomatio report |
| Env variable | What it does | Examples |
|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------|
| TESTOMATIO_TITLE | Name of a test run to create on testomat.io | TESTOMATIO_TITLE="Nightly Smoke Tests" pytest --testomatio report |
| TESTOMATIO_RUN_ID | Id of existing test run to use for sending test results to | TESTOMATIO_RUN_ID=98dfas0 pytest --testomatio report |
| TESTOMATIO_RUNGROUP_TITLE | Create a group (folder) for a test run. If group already exists, attach test run to it | TESTOMATIO_RUNGROUP_TITLE="Release 2.0" pytest --testomatio report |
| TESTOMATIO_ENV | Assign environment to a test run, env variant of **testRunEnv** option. Has a lower precedence than **testRunEnv** option. | TESTOMATIO_ENV="linux,chrome,1920x1080" pytest --testomatio report |
| TESTOMATIO_LABEL | Assign labels to a test run. Labels must exist in project and their scope must be enabled for runs | TESTOMATIO_LABEL="smoke,regression" pytest --testomatio report |
| TESTOMATIO_UPDATE_CODE | Send code of your test to Testomat.io on each run. If not enabled(default) assumes the code is pushed using **sync** command | TESTOMATIO_UPDATE_CODE=True pytest --testomatio report |
| TESTOMATIO_EXCLUDE_SKIPPED | Exclude skipped tests from the report | TESTOMATIO_EXCLUDE_SKIPPED=1 pytest --testomatio report |
| TESTOMATIO_PUBLISH | Publish run after reporting and provide a public URL | TESTOMATIO_PUBLISH=true pytest --testomatio report |
| TESTOMATIO_PROCEED | Do not finalize the run | TESTOMATIO_PROCEED=1 pytest --testomatio report |
| TESTOMATIO_SHARED_RUN | Report parallel execution to the same run matching it by title. If the run was created more than 20 minutes ago, a new run will be created instead. | TESTOMATIO_TITLE="Run1" TESTOMATIO_SHARED_RUN=1 pytest --testomatio report |
| TESTOMATIO_SHARED_RUN_TIMEOUT | Changes timeout of shared run. After timeout, shared run won`t accept other runs with same name, and new runs will be created. Timeout is set in minutes, default is 20 minutes. | TESTOMATIO_TITLE="Run1" TESTOMATIO_SHARED_RUN=1 TESTOMATIO_SHARED_RUN_TIMEOUT=10 pytest --testomatio report |
| TESTOMATIO_DISABLE_BATCH_UPLOAD | Disables batch uploading and uploads each test result one by one. | TESTOMATIO_DISABLE_BATCH_UPLOAD=True pytest --testomatio report |
| TESTOMATIO_BATCH_SIZE | Changes size of batch for batch uploading. Default is 50. Maximum is 100. | TESTOMATIO_BATCH_SIZE=15 pytest --testomatio report |


#### S3 Bucket configuration
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ version_provider = "pep621"
update_changelog_on_bump = false
[project]
name = "pytestomatio"
version = "2.10.2"
version = "2.10.3b1"


dependencies = [
Expand Down
32 changes: 32 additions & 0 deletions pytestomatio/connect/connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,38 @@ def update_test_status(self, run_id: str,
if response.status_code == 200:
log.info('Test status updated')

def batch_tests_upload(self, run_id: str,
batch_size: int,
tests: list) -> None:
# TODO: add retry logic
if not tests:
log.info(f'No tests to report. Report skipped')
return

try:
log.info(f'Starting batch test report into test run. Run id: {run_id}, number of tests: {len(tests)}, '
f'batch size: {batch_size}')
for i in range(0, len(tests), batch_size):
batch = tests[i:i+batch_size]
batch_index = i // batch_size + 1
request = {
'tests': batch,
'batch_index': batch_index
}
response = self.session.post(f'{self.base_url}/api/reporter/{run_id}/testrun?api_key={self.api_key}',
json=request)
if response.status_code == 200:
log.info(f'Tests status updated. Batch index: {batch_index}')
except ConnectionError as ce:
log.error(f'Failed to connect to {self.base_url}: {ce}')
return
except HTTPError as he:
log.error(f'HTTP error occurred while connecting to {self.base_url}: {he}')
return
except Exception as e:
log.error(f'An unexpected exception occurred. Please report an issue: {e}')
return

# TODO: I guess this class should be just an API client and used within testRun (testRunConfig)
def finish_test_run(self, run_id: str, is_final=False) -> None:
status_event = 'finish_parallel' if is_final else 'finish'
Expand Down
33 changes: 33 additions & 0 deletions pytestomatio/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,8 @@ def pytest_runtest_logfinish(nodeid, location):
return
elif not pytest.testomatio.test_run_config.test_run_id:
return
if not pytest.testomatio.test_run_config.disable_batch:
return

for nodeid, request in pytest.testomatio.test_run_config.status_request.items():
if request['status']:
Expand All @@ -236,6 +238,37 @@ def pytest_runtest_logfinish(nodeid, location):
pytest.testomatio.test_run_config.status_request = {}


@pytest.hookimpl(optionalhook=True)
def pytest_testnodedown(node, error):
if not hasattr(node, 'workeroutput') or not hasattr(pytest, 'testomatio') or \
node.config.getoption(testomatio) is None or node.config.getoption(testomatio) != 'report':
return
if pytest.testomatio.test_run_config.disable_batch:
return

log.info(f"Collecting test results from worker '{node.workerinfo.get('id')}'")
worker_results = node.workeroutput.get('testrun_results', {})
pytest.testomatio.test_run_config.status_request.update(worker_results)
log.info(f"{len(worker_results)} test results added to the master test run")


def pytest_sessionfinish(session, exitstatus):
if not hasattr(pytest, 'testomatio') or session.config.getoption(testomatio) is None \
or session.config.getoption(testomatio) != 'report':
return

run: TestRunConfig = pytest.testomatio.test_run_config
if not run.disable_batch:

# xdist worker process - write test results to worker output. They will be reported from master process
if hasattr(session.config, 'workerinput'):
session.config.workeroutput['testrun_results'] = run.status_request
return

pytest.testomatio.connector.batch_tests_upload(run.test_run_id, run.batch_size,
list(run.status_request.values()))


def pytest_unconfigure(config: Config):
if not hasattr(pytest, 'testomatio') or config.getoption(testomatio) is None:
return
Expand Down
5 changes: 5 additions & 0 deletions pytestomatio/testomatio/testRunConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,23 @@
from typing import Optional

TESTOMATIO_TEST_RUN_LOCK_FILE = ".testomatio_test_run_id_lock"
DEFAULT_BATCH_SIZE = 50

class TestRunConfig:
def __init__(self):
run_id = os.environ.get('TESTOMATIO_RUN_ID') or os.environ.get('TESTOMATIO_RUN')
title = os.environ.get('TESTOMATIO_TITLE') if os.environ.get('TESTOMATIO_TITLE') else 'test run at ' + dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
disable_batch_upload = os.environ.get('TESTOMATIO_DISABLE_BATCH_UPLOAD') in ['True', 'true', '1']
batch_size = os.environ.get('TESTOMATIO_BATCH_SIZE', '')
shared_run = os.environ.get('TESTOMATIO_SHARED_RUN') in ['True', 'true', '1']
update_code = os.environ.get('TESTOMATIO_UPDATE_CODE', False) in ['True', 'true', '1']
exclude_skipped = os.environ.get('TESTOMATIO_EXCLUDE_SKIPPED', False) in ['True', 'true', '1']
shared_run_timeout = os.environ.get('TESTOMATIO_SHARED_RUN_TIMEOUT', '')
self.access_event = 'publish' if os.environ.get("TESTOMATIO_PUBLISH") else None
self.test_run_id = run_id
self.title = title
self.disable_batch = disable_batch_upload
self.batch_size = int(batch_size) if (batch_size.isdigit() and int(batch_size) <= 100) else DEFAULT_BATCH_SIZE
self.environment = safe_string_list(os.environ.get('TESTOMATIO_ENV'))
self.exclude_skipped = exclude_skipped
self.label = safe_string_list(os.environ.get('TESTOMATIO_LABEL'))
Expand Down
18 changes: 18 additions & 0 deletions tests/test_connect/test_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,24 @@ def test_update_test_status_success(self, mock_post, connector):
assert payload['overwrite'] is True
assert 'message' not in payload

@patch('requests.Session.post')
def test_batch_upload_success(self, mock_post, connector):
"""Test successful batch upload"""
mock_response = Mock()
mock_response.status_code = 200
mock_post.return_value = mock_response

tests = [{} for i in range(0, 100)]
batch_size = 50
run_id = 'AS23Fd'

connector.batch_tests_upload(run_id, batch_size, tests)

assert mock_post.call_count == len(tests)/batch_size
call_args = mock_post.call_args

assert f'{connector.base_url}/api/reporter/{run_id}/testrun' in call_args[0][0]

@patch('requests.Session.post')
def test_update_test_status_filters_none_values(self, mock_post, connector):
"""Test update test status filters keys with none value"""
Expand Down
42 changes: 40 additions & 2 deletions tests/test_testomatio/test_testRunConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
from unittest.mock import patch, mock_open

from pytestomatio.testomatio.testRunConfig import TestRunConfig, TESTOMATIO_TEST_RUN_LOCK_FILE
from pytestomatio.testomatio.testRunConfig import TestRunConfig, TESTOMATIO_TEST_RUN_LOCK_FILE, DEFAULT_BATCH_SIZE


class TestTestRunConfig:
Expand All @@ -21,6 +21,8 @@ def test_init_default_values(self):
assert config.title == "test run at 2024-01-15 10:30:45"
assert config.environment is None
assert config.exclude_skipped is False
assert config.disable_batch is False
assert config.batch_size == DEFAULT_BATCH_SIZE
assert config.label is None
assert config.group_title is None
assert config.parallel is True
Expand All @@ -40,7 +42,9 @@ def test_init_with_env_variables(self):
'TESTOMATIO_RUNGROUP_TITLE': 'Release 2.0',
'TESTOMATIO_UPDATE_CODE': '1',
'TESTOMATIO_PUBLISH': '1',
'TESTOMATIO_EXCLUDE_SKIPPED': '1'
'TESTOMATIO_EXCLUDE_SKIPPED': '1',
'TESTOMATIO_DISABLE_BATCH_UPLOAD': 'True',
'TESTOMATIO_BATCH_SIZE': '12'
}

with patch.dict(os.environ, env_vars, clear=True):
Expand All @@ -51,6 +55,8 @@ def test_init_with_env_variables(self):
assert config.title == 'Custom Test Run'
assert config.environment == 'linux,browser:chrome,1920x1080'
assert config.exclude_skipped is True
assert config.disable_batch is True
assert config.batch_size == 12
assert config.label == 'smoke,regression'
assert config.group_title == 'Release 2.0'
assert config.parallel is True
Expand Down Expand Up @@ -110,6 +116,38 @@ def test_init_exclude_skipped_false_variations(self, value):

assert config.exclude_skipped is False

@pytest.mark.parametrize('value', ['True', 'true', '1'])
def test_init_disable_batch_upload_true_variations(self, value):
"""Test different true values for TESTOMATIO_DISABLE_BATCH_UPLOAD"""
with patch.dict(os.environ, {'TESTOMATIO_DISABLE_BATCH_UPLOAD': value}, clear=True):
config = TestRunConfig()

assert config.disable_batch is True

@pytest.mark.parametrize('value', ['False', 'false', '0', 'anything'])
def test_init_disable_batch_upload_false_variations(self, value):
"""Test different false values TESTOMATIO_DISABLE_BATCH_UPLOAD"""
with patch.dict(os.environ, {'TESTOMATIO_DISABLE_BATCH_UPLOAD': value}, clear=True):
config = TestRunConfig()

assert config.disable_batch is False

@pytest.mark.parametrize('value', ['1', '10', '11'])
def test_init_batch_size_true_variations(self, value):
"""Test different true values for TESTOMATIO_BATCH_SIZE"""
with patch.dict(os.environ, {'TESTOMATIO_BATCH_SIZE': value}, clear=True):
config = TestRunConfig()

assert config.batch_size == int(value)

@pytest.mark.parametrize('value', ['False', 'false', '101', 'anything'])
def test_init_batch_size_false_variations(self, value):
"""Test different false values TESTOMATIO_BATCH_SIZE"""
with patch.dict(os.environ, {'TESTOMATIO_BATCH_SIZE': value}, clear=True):
config = TestRunConfig()

assert config.batch_size == DEFAULT_BATCH_SIZE

def test_to_dict_full_data(self):
"""Test to_dict with full data"""
env_vars = {
Expand Down
Loading