Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 24 additions & 2 deletions src/agentready/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@

import click

try:
from importlib.metadata import version as get_version
except ImportError:
# Python 3.7 compatibility
from importlib_metadata import version as get_version

from ..assessors.code_quality import (
CyclomaticComplexityAssessor,
TypeAnnotationsAssessor,
Expand All @@ -29,6 +35,19 @@
from .bootstrap import bootstrap
from .demo import demo
from .learn import learn
from .repomix import repomix_generate
Comment on lines 35 to +38

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Remove import of nonexistent repomix CLI command

The CLI module now imports repomix_generate from .repomix, but there is no src/agentready/cli/repomix.py in the package. Because the import is executed at module load time, any agentready invocation now raises ModuleNotFoundError: No module named 'agentready.cli.repomix' before parsing arguments, effectively breaking the CLI. Unless a repomix command is added, this import should be dropped or gated.

Useful? React with 👍 / 👎.



def get_agentready_version() -> str:
"""Get AgentReady version from package metadata.

Returns:
Version string (e.g., "1.0.0") or "unknown" if not installed
"""
try:
return get_version("agentready")
except Exception:
return "unknown"


def create_all_assessors():
Expand Down Expand Up @@ -152,7 +171,8 @@ def run_assessment(repository_path, verbose, output_dir, config_path):

# Run scan
try:
assessment = scanner.scan(assessors, verbose=verbose)
version = get_agentready_version()
assessment = scanner.scan(assessors, verbose=verbose, version=version)
except Exception as e:
click.echo(f"Error during assessment: {str(e)}", err=True)
if verbose:
Expand Down Expand Up @@ -286,11 +306,13 @@ def generate_config():
cli.add_command(bootstrap)
cli.add_command(demo)
cli.add_command(learn)
cli.add_command(repomix_generate)


def show_version():
"""Show version information."""
click.echo("AgentReady Repository Scorer v1.0.0")
version = get_agentready_version()
click.echo(f"AgentReady Repository Scorer v{version}")
click.echo("Research Report: bundled")


Expand Down
2 changes: 2 additions & 0 deletions src/agentready/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
from agentready.models.config import Config
from agentready.models.discovered_skill import DiscoveredSkill
from agentready.models.finding import Finding
from agentready.models.metadata import AssessmentMetadata
from agentready.models.repository import Repository

__all__ = [
"Assessment",
"AssessmentMetadata",
"Attribute",
"Citation",
"Config",
Expand Down
4 changes: 4 additions & 0 deletions src/agentready/models/assessment.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .config import Config
from .discovered_skill import DiscoveredSkill
from .finding import Finding
from .metadata import AssessmentMetadata
from .repository import Repository


Expand All @@ -25,6 +26,7 @@ class Assessment:
config: Custom configuration used (if any)
duration_seconds: Time taken for assessment
discovered_skills: Patterns extracted from this assessment (optional)
metadata: Execution context (version, user, command, timestamp)
"""

repository: Repository
Expand All @@ -38,6 +40,7 @@ class Assessment:
config: Config | None
duration_seconds: float
discovered_skills: list[DiscoveredSkill] = field(default_factory=list)
metadata: AssessmentMetadata | None = None

VALID_LEVELS = {"Platinum", "Gold", "Silver", "Bronze", "Needs Improvement"}

Expand Down Expand Up @@ -70,6 +73,7 @@ def __post_init__(self):
def to_dict(self) -> dict:
"""Convert to dictionary for JSON serialization."""
return {
"metadata": self.metadata.to_dict() if self.metadata else None,
"repository": self.repository.to_dict(),
"timestamp": self.timestamp.isoformat(),
"overall_score": self.overall_score,
Expand Down
88 changes: 88 additions & 0 deletions src/agentready/models/metadata.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""Assessment metadata model for execution context and reproducibility."""

import getpass
import os
import socket
from dataclasses import dataclass
from datetime import datetime


@dataclass
class AssessmentMetadata:
"""Metadata about the assessment execution context.
Captures who ran the assessment, when, with what version, and what command.
Critical for reproducibility, debugging, and multi-repository workflows.
Attributes:
agentready_version: Version of AgentReady used (e.g., "1.0.0")
assessment_timestamp: ISO 8601 timestamp of when assessment started
assessment_timestamp_human: Human-readable timestamp (e.g., "November 21, 2025 at 2:11 AM")
executed_by: Username and hostname (e.g., "jeder@macbook")
command: Full CLI command executed (e.g., "agentready assess . --verbose")
working_directory: Absolute path of current working directory when executed
"""

agentready_version: str
assessment_timestamp: str # ISO 8601 format
assessment_timestamp_human: str
executed_by: str
command: str
working_directory: str

def to_dict(self) -> dict:
"""Convert to dictionary for JSON serialization."""
return {
"agentready_version": self.agentready_version,
"assessment_timestamp": self.assessment_timestamp,
"assessment_timestamp_human": self.assessment_timestamp_human,
"executed_by": self.executed_by,
"command": self.command,
"working_directory": self.working_directory,
}

@classmethod
def create(
cls, version: str, timestamp: datetime, command: str
) -> "AssessmentMetadata":
"""Create metadata from execution context.
Args:
version: AgentReady version string
timestamp: Assessment start time
command: CLI command executed
Returns:
AssessmentMetadata instance
"""
# Get username and hostname
try:
username = getpass.getuser()
except Exception:
username = "unknown"

try:
hostname = socket.gethostname().split(".")[0] # Short hostname
except Exception:
hostname = "unknown"

executed_by = f"{username}@{hostname}"

# Format timestamps
iso_timestamp = timestamp.isoformat()
human_timestamp = timestamp.strftime("%B %d, %Y at %-I:%M %p")
Comment on lines +71 to +73

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Use portable time format when building metadata

AssessmentMetadata formats the human-readable timestamp with timestamp.strftime("%B %d, %Y at %-I:%M %p"). The %-I flag is POSIX-only; on Windows strftime raises ValueError: Invalid format string, so every assessment on Windows will crash when metadata is created (the scanner doesn’t guard this call). Use a portable directive (e.g., %I with lstrip('0')) or conditionally choose the format to keep Windows users working.

Useful? React with 👍 / 👎.


# Get current working directory
try:
working_dir = os.getcwd()
except Exception:
working_dir = "unknown"

return cls(
agentready_version=version,
assessment_timestamp=iso_timestamp,
assessment_timestamp_human=human_timestamp,
executed_by=executed_by,
command=command,
working_directory=working_dir,
)
1 change: 1 addition & 0 deletions src/agentready/reporters/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ def generate(self, assessment: Assessment, output_path: Path) -> Path:
"findings": assessment.findings,
"duration_seconds": assessment.duration_seconds,
"config": assessment.config,
"metadata": assessment.metadata,
# Embed assessment JSON for JavaScript
"assessment_json": json.dumps(assessment.to_dict()),
}
Expand Down
33 changes: 24 additions & 9 deletions src/agentready/reporters/markdown.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,17 +62,32 @@ def generate(self, assessment: Assessment, output_path: Path) -> Path:
return output_path

def _generate_header(self, assessment: Assessment) -> str:
"""Generate report header with repository info."""
# Get git remote URL if available, otherwise use repo name
repo_display = assessment.repository.url if assessment.repository.url else assessment.repository.name

return f"""# 🤖 AgentReady Assessment Report
"""Generate report header with repository info and metadata."""
header = "# 🤖 AgentReady Assessment Report\n\n"

# Repository information
header += f"**Repository**: {assessment.repository.name}\n"
header += f"**Path**: `{assessment.repository.path}`\n"
header += f"**Branch**: `{assessment.repository.branch}` | **Commit**: `{assessment.repository.commit_hash[:8]}`\n"

# Assessment metadata (if available)
if assessment.metadata:
header += (
f"**Assessed**: {assessment.metadata.assessment_timestamp_human}\n"
)
header += (
f"**AgentReady Version**: {assessment.metadata.agentready_version}\n"
)
header += f"**Run by**: {assessment.metadata.executed_by}\n"
else:
# Fallback to timestamp if metadata not available
header += (
f"**Assessed**: {assessment.timestamp.strftime('%B %d, %Y at %H:%M')}\n"
)

| Repository | Branch | Commit | Score | Level | Date |
|------------|--------|--------|-------|-------|------|
| **{repo_display}** | {assessment.repository.branch} | `{assessment.repository.commit_hash[:8]}` | **{assessment.overall_score:.1f}/100** | **{assessment.certification_level}** | {assessment.timestamp.strftime('%Y-%m-%d %H:%M')} |
header += "\n---"

---"""
return header

def _generate_summary(self, assessment: Assessment) -> str:
"""Generate summary section with key metrics."""
Expand Down
27 changes: 24 additions & 3 deletions src/agentready/services/scanner.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Scanner service orchestrating the assessment workflow."""

import sys
import time
from datetime import datetime
from pathlib import Path
Expand All @@ -9,6 +10,7 @@
from ..models.assessment import Assessment
from ..models.config import Config
from ..models.finding import Finding
from ..models.metadata import AssessmentMetadata
from ..models.repository import Repository
from .language_detector import LanguageDetector
from .scorer import Scorer
Expand Down Expand Up @@ -63,12 +65,20 @@ def _validate_repository(self):
if not (self.repository_path / ".git").exists():
raise ValueError(f"Not a git repository: {self.repository_path}")

def scan(self, assessors: list, verbose: bool = False) -> Assessment:
def scan(
self,
assessors: list,
verbose: bool = False,
version: str = "unknown",
command: str | None = None,
) -> Assessment:
"""Execute full assessment workflow.

Args:
assessors: List of assessor instances to run
verbose: Enable detailed progress logging
version: AgentReady version string
command: CLI command executed (reconstructed from sys.argv if None)

Returns:
Complete Assessment with findings and scores
Expand All @@ -81,6 +91,7 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:
5. Return Assessment
"""
start_time = time.time()
timestamp = datetime.now()

if verbose:
print(f"Scanning repository: {self.repository_path.name}")
Expand All @@ -107,6 +118,15 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:

duration = time.time() - start_time

# Create metadata
if command is None:
# Reconstruct command from sys.argv
command = " ".join(sys.argv)

metadata = AssessmentMetadata.create(
version=version, timestamp=timestamp, command=command
)

if verbose:
print(f"\nAssessment complete in {duration:.1f}s")
print(f"Overall Score: {overall_score}/100 ({certification_level})")
Expand All @@ -116,7 +136,7 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:

return Assessment(
repository=repository,
timestamp=datetime.now(),
timestamp=timestamp,
overall_score=overall_score,
certification_level=certification_level,
attributes_assessed=assessed,
Expand All @@ -125,6 +145,7 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:
findings=findings,
config=self.config,
duration_seconds=round(duration, 1),
metadata=metadata,
)

def _build_repository_model(self, verbose: bool = False) -> Repository:
Expand Down Expand Up @@ -202,7 +223,7 @@ def _execute_assessor(
)
except Exception as e:
if verbose:
print(f"error (applicability check failed)")
print("error (applicability check failed)")
return Finding.error(
assessor.attribute, reason=f"Applicability check failed: {str(e)}"
)
Expand Down
Loading
Loading