Skip to content
9 changes: 7 additions & 2 deletions src/agentready/cli/extract_skills.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
)
@click.option(
"--llm-budget",
type=int,
type=click.IntRange(min=1),
default=5,
help="Maximum number of skills to enrich with LLM (default: 5)",
)
Expand Down Expand Up @@ -153,10 +153,15 @@ def extract_skills(
enable_llm = False
click.echo()

# Resolve output directory relative to repository path if it's a relative path
output_dir_path = Path(output_dir)
if not output_dir_path.is_absolute():
output_dir_path = repo_path / output_dir

# Create learning service
learning_service = LearningService(
min_confidence=min_confidence,
output_dir=output_dir,
output_dir=output_dir_path,
)

# Run learning workflow
Expand Down
9 changes: 7 additions & 2 deletions src/agentready/cli/learn.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
)
@click.option(
"--llm-budget",
type=int,
type=click.IntRange(min=1),
default=5,
help="Maximum number of skills to enrich with LLM (default: 5)",
)
Expand Down Expand Up @@ -153,10 +153,15 @@ def learn(
enable_llm = False
click.echo()

# Resolve output directory relative to repository path if it's a relative path
output_dir_path = Path(output_dir)
if not output_dir_path.is_absolute():
output_dir_path = repo_path / output_dir

# Create learning service
learning_service = LearningService(
min_confidence=min_confidence,
output_dir=output_dir,
output_dir=output_dir_path,
)

# Run learning workflow
Expand Down
2 changes: 1 addition & 1 deletion src/agentready/learners/code_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def _format_code_samples(self, files: list) -> str:
samples = []

for file_item in files[: self.max_files]:
if isinstance(file_item, dict):
if isinstance(file_item, dict) and "path" in file_item:
# Directory tree
samples.append(f"## Directory Structure: {file_item['path']}\n")
samples.append(self._format_tree(file_item))
Expand Down
5 changes: 2 additions & 3 deletions src/agentready/learners/pattern_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,8 @@ def extract_specific_patterns(
discovered_skills = []

for finding in self.assessment.findings:
if (
finding.attribute.attribute_id in attribute_ids
and self._should_extract_pattern(finding)
if finding.attribute.id in attribute_ids and self._should_extract_pattern(
finding
):
skill = self._create_skill_from_finding(finding)
if skill:
Expand Down
10 changes: 5 additions & 5 deletions src/agentready/services/research_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,13 +276,12 @@ def format_report(self, content: str) -> str:
lines = [line.rstrip() for line in lines]
content = "\n".join(lines)

# Ensure file ends with single newline
if not content.endswith("\n"):
content += "\n"

# Remove multiple blank lines (max 2 consecutive blank lines)
content = re.sub(r"\n{4,}", "\n\n\n", content)

# Ensure file ends with exactly one newline
content = content.rstrip("\n") + "\n"

return content

def extract_attribute_ids(self, content: str) -> list[str]:
Expand All @@ -294,7 +293,8 @@ def extract_attribute_ids(self, content: str) -> list[str]:
Returns:
List of attribute IDs (e.g., ["1.1", "1.2", "2.1", ...])
"""
pattern = r"^###\s+(\d+\.\d+)\s+"
# Extract both valid and potentially malformed IDs for validation
pattern = r"^###\s+([\d]+\.[\w]+)\s+"
matches = re.findall(pattern, content, re.MULTILINE)
return matches

Expand Down
14 changes: 12 additions & 2 deletions tests/unit/learners/test_llm_enricher.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,12 @@ def test_enrich_skill_rate_limit_retry(
client = Mock(spec=Anthropic)

# First call raises rate limit, second succeeds
rate_limit_error = RateLimitError("Rate limit")
# Mock response and body for RateLimitError
mock_response = Mock()
mock_response.status_code = 429
rate_limit_error = RateLimitError(
"Rate limit", response=mock_response, body={"error": "rate_limit"}
)
rate_limit_error.retry_after = 1 # 1 second retry

success_response = Mock()
Expand Down Expand Up @@ -322,7 +327,12 @@ def test_enrich_skill_api_error_specific(
from anthropic import APIError

client = Mock(spec=Anthropic)
client.messages.create.side_effect = APIError("API Error")
# Mock request for APIError
mock_request = Mock()
mock_request.method = "POST"
client.messages.create.side_effect = APIError(
"API Error", request=mock_request, body={"error": "api_error"}
)

cache_dir = tmp_path / "cache"
enricher = LLMEnricher(client, cache_dir=cache_dir)
Expand Down
35 changes: 29 additions & 6 deletions tests/unit/learners/test_pattern_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,29 @@
from agentready.models import Assessment, Attribute, Finding, Repository


def create_dummy_finding() -> Finding:
"""Create a dummy finding for testing (not_applicable status)."""
attr = Attribute(
id="test_attr",
name="Test Attribute",
category="Testing",
tier=1,
description="Test attribute",
criteria="Test criteria",
default_weight=1.0,
)
return Finding(
attribute=attr,
status="not_applicable",
score=None,
measured_value=None,
threshold=None,
evidence=[],
remediation=None,
error_message=None,
)


def create_test_repository(tmp_path=None):
"""Create a test repository with valid path."""
if tmp_path is None:
Expand Down Expand Up @@ -333,7 +356,7 @@ def test_should_extract_pattern_logic(self, sample_finding_high_score):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=[create_dummy_finding()],
config=None,
duration_seconds=1.0,
)
Expand Down Expand Up @@ -395,7 +418,7 @@ def test_create_skill_from_finding(self, sample_finding_high_score):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=[create_dummy_finding()],
config=None,
duration_seconds=1.0,
)
Expand Down Expand Up @@ -513,7 +536,7 @@ def test_extract_code_examples_from_evidence(self, sample_finding_high_score):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=[create_dummy_finding()],
config=None,
duration_seconds=1.0,
)
Expand Down Expand Up @@ -554,7 +577,7 @@ def test_extract_code_examples_limits_to_three(self, sample_repository):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=[create_dummy_finding()],
config=None,
duration_seconds=1.0,
)
Expand All @@ -574,7 +597,7 @@ def test_create_pattern_summary(self, sample_finding_high_score):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=[create_dummy_finding()],
config=None,
duration_seconds=1.0,
)
Expand Down Expand Up @@ -615,7 +638,7 @@ def test_pattern_summary_fallback_to_evidence(self, sample_repository):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=[create_dummy_finding()],
config=None,
duration_seconds=1.0,
)
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/learners/test_skill_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def test_generate_markdown_report(self, sample_skill, tmp_path):
content = output_file.read_text()
assert "Test Skill" in content
assert "test-skill" in content
assert "90%" in content # Confidence
assert "90.0%" in content # Confidence
assert "+50.0 pts" in content # Impact
assert "85.0%" in content # Reusability

Expand Down
8 changes: 4 additions & 4 deletions tests/unit/test_cli_extract_skills.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,9 @@ def test_extract_skills_command_skill_md_output(self, runner, temp_repo):

assert result.exit_code == 0

# Check for SKILL.md files
# Check for SKILL.md files (in subdirectories: skill-id/SKILL.md)
output_dir = temp_repo / ".skills-proposals"
md_files = list(output_dir.glob("*.md"))
md_files = list(output_dir.glob("*/SKILL.md"))
assert len(md_files) > 0

def test_extract_skills_command_github_issues_output(self, runner, temp_repo):
Expand All @@ -98,9 +98,9 @@ def test_extract_skills_command_github_issues_output(self, runner, temp_repo):

assert result.exit_code == 0

# Check for issue files
# Check for issue files (named skill-{id}.md)
output_dir = temp_repo / ".skills-proposals"
issue_files = list(output_dir.glob("issue-*.md"))
issue_files = list(output_dir.glob("skill-*.md"))
assert len(issue_files) > 0

def test_extract_skills_command_all_output_formats(self, runner, temp_repo):
Expand Down
33 changes: 28 additions & 5 deletions tests/unit/test_cli_learn.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,36 @@ def temp_repo():
agentready_dir = repo_path / ".agentready"
agentready_dir.mkdir()

# Create sample assessment using shared fixture
# Create sample assessment with known skill IDs that PatternExtractor recognizes
from tests.fixtures.assessment_fixtures import create_test_finding_json

findings = [
create_test_finding_json(
attribute_id="claude_md_file",
attribute_name="CLAUDE.md File",
status="pass",
score=95.0,
category="Documentation",
tier=1,
),
create_test_finding_json(
attribute_id="type_annotations",
attribute_name="Type Annotations",
status="pass",
score=90.0,
category="Code Quality",
tier=2,
),
]

assessment_data = create_test_assessment_json(
overall_score=85.0,
num_findings=2,
repo_path=str(repo_path),
repo_name="test-repo",
)
# Replace generic findings with skill-specific ones
assessment_data["findings"] = findings

assessment_file = agentready_dir / "assessment-latest.json"
with open(assessment_file, "w") as f:
Expand Down Expand Up @@ -82,9 +105,9 @@ def test_learn_command_skill_md_output(self, runner, temp_repo):

assert result.exit_code == 0

# Check for SKILL.md files
# Check for SKILL.md files (in subdirectories: skill-id/SKILL.md)
output_dir = temp_repo / ".skills-proposals"
md_files = list(output_dir.glob("*.md"))
md_files = list(output_dir.glob("*/SKILL.md"))
assert len(md_files) > 0

def test_learn_command_github_issues_output(self, runner, temp_repo):
Expand All @@ -96,9 +119,9 @@ def test_learn_command_github_issues_output(self, runner, temp_repo):

assert result.exit_code == 0

# Check for issue files
# Check for issue files (named skill-{id}.md)
output_dir = temp_repo / ".skills-proposals"
issue_files = list(output_dir.glob("issue-*.md"))
issue_files = list(output_dir.glob("skill-*.md"))
assert len(issue_files) > 0

def test_learn_command_all_output_formats(self, runner, temp_repo):
Expand Down
53 changes: 39 additions & 14 deletions tests/unit/test_csv_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,44 @@
import pytest

from src.agentready.models.assessment import Assessment
from src.agentready.models.attribute import Attribute
from src.agentready.models.batch_assessment import (
BatchAssessment,
BatchSummary,
RepositoryResult,
)
from src.agentready.models.finding import Finding
from src.agentready.models.repository import Repository
from src.agentready.reporters.csv_reporter import CSVReporter


def create_dummy_findings(count: int) -> list[Finding]:
"""Create dummy findings for testing."""
findings = []
for i in range(count):
attr = Attribute(
id=f"test_attr_{i}",
name=f"Test Attribute {i}",
category="Testing",
tier=1,
description="Test attribute",
criteria="Test criteria",
default_weight=1.0,
)
finding = Finding(
attribute=attr,
status="not_applicable",
score=None,
measured_value=None,
threshold=None,
evidence=[],
remediation=None,
error_message=None,
)
findings.append(finding)
return findings


@pytest.fixture
def temp_csv_file(tmp_path):
"""Create temporary CSV file for testing."""
Expand Down Expand Up @@ -104,7 +133,7 @@ def mock_batch_assessment(mock_assessment, tmp_path):
attributes_assessed=20,
attributes_not_assessed=5,
attributes_total=25,
findings=[],
findings=create_dummy_findings(25),
config=None,
duration_seconds=38.0,
discovered_skills=[],
Expand Down Expand Up @@ -293,20 +322,16 @@ def test_csv_empty_batch(self, tmp_path):
failed_assessments=0,
average_score=0.0,
)
batch = BatchAssessment(
batch_id="empty-batch",
timestamp=datetime.now(),
results=[],
summary=summary,
total_duration_seconds=0.0,
)

reporter = CSVReporter()
output_path = tmp_path / "empty.csv"

# Should not raise error, but will only have header
# BatchAssessment validation should raise ValueError during construction
with pytest.raises(ValueError, match="Batch must have at least one result"):
reporter.generate(batch, output_path)
BatchAssessment(
batch_id="empty-batch",
timestamp=datetime.now(),
results=[],
summary=summary,
total_duration_seconds=0.0,
)

def test_csv_creates_parent_directory(self, tmp_path):
"""Test that CSV reporter creates parent directories if needed."""
Expand Down Expand Up @@ -336,7 +361,7 @@ def test_csv_creates_parent_directory(self, tmp_path):
attributes_assessed=1,
attributes_not_assessed=0,
attributes_total=1,
findings=[],
findings=create_dummy_findings(1),
config=None,
duration_seconds=1.0,
discovered_skills=[],
Expand Down
Loading
Loading