Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,19 @@ If the command completes sucessfully, you can run the verification tool:
nbsapi_verify --config-dir ~
```

You can also generate JSON and HTML reports of the test results:

```shell
# Generate default JSON report (nbsapi_verify_report.json)
nbsapi_verify --config-dir ~ --json-output

# Generate default HTML report (nbsapi_verify_report.html)
nbsapi_verify --config-dir ~ --html-output

# Generate both reports
nbsapi_verify --config-dir ~ --json-output --html-output
```

When all tests pass, your API implementation is conformant to the `NbsAPI` specification!

## Help
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ requires-python = ">=3.9"
dependencies = [
"click>=8.1.8",
"tavern>=2.11.0",
"jinja2>=3.0.0",
]
authors = [
{ name = "Stephan Hügel", email = "urschrei@gmail.com" },
Expand Down
82 changes: 69 additions & 13 deletions src/nbsapi_verify/cli.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# ruff: noqa: UP007

import json
import os
import sys
from enum import Enum
from pathlib import Path
Expand All @@ -9,7 +11,7 @@
import pytest
import yaml

from .formatting import ResultCapture, format_results
from .formatting import ResultCapture, format_json, format_results, render_html


class TestType(str, Enum):
Expand Down Expand Up @@ -94,6 +96,22 @@ def get_config_path(config_dir: Optional[str] = None) -> Path:
default="all",
help="Type of tests to run",
)
@click.option(
"--json-output",
type=click.Path(),
default=None,
is_flag=True,
flag_value="",
help="Output test results as JSON",
)
@click.option(
"--html-output",
type=click.Path(),
default=None,
is_flag=True,
flag_value="",
help="Output test results as HTML",
)
def cli(
generate: bool,
config_dir: Optional[str],
Expand All @@ -103,8 +121,10 @@ def cli(
password: Optional[str],
solution: int,
test_type: str,
json_output: Optional[str],
html_output: Optional[str],
):
"""Tavern test runner and configuration generator."""
"""NbSAPI test runner and configuration generator."""
if generate:
if not host:
click.echo("Error: --host is required when using --generate", err=True)
Expand Down Expand Up @@ -148,14 +168,16 @@ def cli(
err=True,
)
sys.exit(1)

# Load config to check available test types
with open(config_path) as f:
config = yaml.safe_load(f)

# Check for test type mismatch
has_auth_config = "username" in config.get("variables", {}) and "password" in config.get("variables", {})

has_auth_config = "username" in config.get(
"variables", {}
) and "password" in config.get("variables", {})

# Detect test type mismatch
if test_type in (TestType.AUTH, TestType.ALL) and not has_auth_config:
click.echo(
Expand All @@ -176,22 +198,22 @@ def cli(

# Verify that requested test types have matching test files
import glob

# Get all test files and check for their markers
test_files = glob.glob(str(test_dir / "*.tavern.yaml"))

# Check if there are any test files with requested markers
has_auth_tests = False
has_public_tests = False

for test_file in test_files:
with open(test_file) as f:
content = f.read()
if "marks:\n- auth" in content:
has_auth_tests = True
if "marks:\n- public" in content:
has_public_tests = True

# Verify requested test type has matching test files
if test_type == TestType.AUTH and not has_auth_tests:
click.echo(
Expand All @@ -200,15 +222,15 @@ def cli(
err=True,
)
sys.exit(1)

if test_type == TestType.PUBLIC and not has_public_tests:
click.echo(
f"Error: Test type '{test_type}' requested but no public test files found.\n"
"Make sure public test files are generated and properly marked.",
err=True,
)
sys.exit(1)

if test_type == TestType.ALL and not (has_auth_tests and has_public_tests):
missing = []
if not has_auth_tests:
Expand Down Expand Up @@ -242,9 +264,43 @@ def cli(
# Run pytest with capture
exit_code = pytest.main(pytest_args, plugins=[capture])

# Print formatted results
# Print formatted results to terminal
click.echo(format_results(capture))

# Handle JSON output if requested
if json_output is not None:
# Use config_dir if provided, otherwise use current directory
base_dir = os.path.curdir if not config_dir else config_dir

# If a path was explicitly provided, use it; otherwise use the default filename
if json_output and json_output != "":
json_path = json_output
else:
json_path = os.path.join(base_dir, "nbsapi_verify_report.json")

json_data = format_json(capture)
with open(json_path, "w") as f:
json.dump(json_data, f, indent=2)
click.echo(f"JSON report saved to: {json_path}")

# Handle HTML output if requested
if html_output is not None:
# Use config_dir if provided, otherwise use current directory
base_dir = os.path.curdir if not config_dir else config_dir

# If a path was explicitly provided, use it; otherwise use the default filename
if html_output and html_output != "":
html_path = html_output
else:
html_path = os.path.join(base_dir, "nbsapi_verify_report.html")

# Generate HTML using JSON data as input
json_data = format_json(capture) if not json_output else None
html_content = render_html(capture, json_data)
with open(html_path, "w") as f:
f.write(html_content)
click.echo(f"HTML report saved to: {html_path}")

sys.exit(exit_code)


Expand Down
71 changes: 67 additions & 4 deletions src/nbsapi_verify/formatting.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
import datetime as dt
import os
from datetime import datetime
from typing import Any, Optional

from _pytest.reports import TestReport


Expand All @@ -7,23 +12,39 @@ def __init__(self):
self.failed: list[dict[str, str]] = []
self.skipped: list[str] = []
self.total_duration: float = 0.0
self.start_time: datetime = datetime.now(tz=dt.timezone.utc)

def pytest_runtest_logreport(self, report: TestReport):
if report.when == "call": # Only process the test result, not setup/teardown
test_id = report.nodeid

# Extract endpoint from test_id
# Format is typically: path/to/test_name_endpoint.tavern.yaml::test_name
# We want to extract the endpoint part (/contact, /v1/api/users, etc.)
import re
endpoint_match = re.search(r'test_.+?_(.+?)\.tavern\.yaml', test_id)
if endpoint_match:
# Replace underscores with slashes to reconstruct the API endpoint
endpoint = endpoint_match.group(1).replace('_', '/')
# Add leading slash for clarity
endpoint = f"/{endpoint}"
else:
# Fallback to original test_id if pattern not found
endpoint = test_id

if report.passed:
self.passed.append(test_id)
self.passed.append(endpoint)
elif report.failed:
self.failed.append(
{
"id": test_id,
"id": endpoint,
"error": str(report.longrepr)
if report.longrepr
else "No error details available",
}
)
elif report.skipped:
self.skipped.append(test_id)
self.skipped.append(endpoint)

if hasattr(report, "duration"):
self.total_duration += report.duration
Expand All @@ -35,7 +56,7 @@ def format_results(capture: ResultCapture) -> str:
failures = False

# Summary line
total = len(capture.passed) + len(capture.failed) + len(capture.skipped)
_ = len(capture.passed) + len(capture.failed) + len(capture.skipped)
output.append(f"\nNbSAPI Conformance Test Summary ({capture.total_duration:.1f}s)")
output.append("=" * 40)

Expand Down Expand Up @@ -74,3 +95,45 @@ def format_results(capture: ResultCapture) -> str:
)

return "\n".join(output)


def format_json(capture: ResultCapture) -> dict[str, Any]:
"""Format test results as a JSON-serializable dictionary."""
total = len(capture.passed) + len(capture.failed) + len(capture.skipped)

return {
"summary": {
"total": total,
"passed": len(capture.passed),
"failed": len(capture.failed),
"skipped": len(capture.skipped),
"duration": round(capture.total_duration, 1),
"timestamp": capture.start_time.strftime("%l:%M%p %Z on %b %d, %Y"),
},
"tests": {
"passed": [test_id for test_id in capture.passed],
"failed": capture.failed,
"skipped": [test_id for test_id in capture.skipped],
},
"is_conformant": len(capture.failed) == 0,
}


def render_html(
capture: ResultCapture, json_data: Optional[dict[str, Any]] = None
) -> str:
"""Render test results as HTML using a Jinja2 template."""
from jinja2 import Template

# Use the JSON data if provided, otherwise generate it
data = json_data if json_data is not None else format_json(capture)

# Get the HTML template
template_path = os.path.join(os.path.dirname(__file__), "templates", "report.html")

with open(template_path) as f:
template_content = f.read()

# Render the template
template = Template(template_content)
return template.render(data=data)
Loading