-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_core_metrics.py
90 lines (76 loc) · 2.83 KB
/
test_core_metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
"""
Core test script for cognitive metrics.
This script provides a minimal test environment for our metrics,
following British English standards and proper type safety.
"""
import spacy
from pathlib import Path
import sys
def main():
"""Test core NLP functionality."""
print("Testing Core NLP Components")
print("==========================")
try:
# Load spaCy model
print("\nLoading NLP model...")
nlp = spacy.load("en_core_web_sm")
print("✓ Model loaded successfully")
# Test text with reasoning patterns
test_text = """
First, let's examine the evidence carefully. According to recent studies,
the approach shows promising results in 75% of cases. However, we must
consider some limitations. For example, the sample size was relatively
small. Therefore, while the data suggests positive outcomes, further
validation would be beneficial.
"""
# Process text
print("\nProcessing test text...")
doc = nlp(test_text)
print("✓ Text processed successfully")
# Test pattern matching
patterns = {
"Logical Steps": [
"first", "therefore", "while"
],
"Evidence": [
"according to", "studies", "data suggests"
],
"Counterarguments": [
"however", "limitations"
]
}
print("\nAnalysing patterns:")
for category, terms in patterns.items():
matches = []
print(f"\n{category}:")
for term in terms:
if term.lower() in test_text.lower():
matches.append(term)
print(f"✓ Found: {term}")
# Show context
for sent in doc.sents:
if term.lower() in sent.text.lower():
print(f" Context: {sent.text.strip()}")
if not matches:
print(f"✗ No matches found for {category}")
# Test sentence segmentation
print("\nTesting sentence segmentation:")
sentences = list(doc.sents)
print(f"✓ Found {len(sentences)} sentences")
# Test dependency parsing
print("\nTesting dependency parsing:")
for token in doc[:5]:
print(f"Token: {token.text}")
print(f"Dependency: {token.dep_}")
print(f"Head: {token.head.text}")
print()
print("\n✓ All core tests completed successfully!")
except Exception as e:
print(f"\n✗ Error: {str(e)}")
import traceback
print("\nTraceback:")
traceback.print_exc()
return 1
return 0
if __name__ == "__main__":
sys.exit(main())