Skip to content

Commit d019a60

Browse files
committed
refactor(tests): remove performance tests for argument processing
Signed-off-by: Daniel Biehl <dbiehl@live.de>
1 parent d698900 commit d019a60

File tree

1 file changed

+0
-161
lines changed

1 file changed

+0
-161
lines changed

tests/robotcode/language_server/robotframework/parts/test_semantic_tokens_unit.py

Lines changed: 0 additions & 161 deletions
Original file line numberDiff line numberDiff line change
@@ -341,22 +341,6 @@ def test_skip_non_data_tokens_real_implementation(self) -> None:
341341
assert current is not None
342342
assert current.value == "arg1"
343343

344-
def test_performance_with_large_argument_list(self) -> None:
345-
"""Test performance with large argument lists (no list slicing)."""
346-
# Create large list of arguments
347-
large_args = [Token(Token.ARGUMENT, f"arg{i}", 1, i * 10) for i in range(1000)]
348-
349-
processor = ArgumentProcessor(large_args)
350-
351-
# Test that all arguments can be efficiently processed
352-
consumed_count = 0
353-
while processor.has_next():
354-
token = processor.consume()
355-
if token:
356-
consumed_count += 1
357-
358-
assert consumed_count == 1000
359-
360344

361345
class TestNamedArgumentProcessor:
362346
"""Test cases for NamedArgumentProcessor class."""
@@ -1480,27 +1464,6 @@ def test_skip_non_data_tokens_comprehensive(self) -> None:
14801464
assert current is not None
14811465
assert current.value == "first_arg"
14821466

1483-
def test_remaining_slice_performance(self) -> None:
1484-
"""Test remaining_slice performance with large lists."""
1485-
# Create large token list
1486-
large_tokens = [Token(Token.ARGUMENT, f"arg{i}", 1, i * 10) for i in range(10000)]
1487-
processor = ArgumentProcessor(large_tokens)
1488-
1489-
# Advance to middle
1490-
for _ in range(5000):
1491-
processor.consume()
1492-
1493-
# Get remaining slice - should be efficient
1494-
import time
1495-
1496-
start_time = time.time()
1497-
remaining = processor.remaining_slice()
1498-
end_time = time.time()
1499-
1500-
# Should be fast (< 1ms) and correct length
1501-
assert (end_time - start_time) < 0.001 # Less than 1ms
1502-
assert len(remaining) == 5000
1503-
15041467

15051468
class TestNamedArgumentValidationRobust:
15061469
"""Robust tests for named argument validation and edge cases."""
@@ -1789,130 +1752,6 @@ def test_semantic_token_mapper_with_unknown_tokens(self) -> None:
17891752
pytest.fail(f"Should not raise exception for unknown token '{token_type}': {e}")
17901753

17911754

1792-
class TestPerformanceScenarios:
1793-
"""Performance tests for semantic token processing."""
1794-
1795-
def test_large_keyword_argument_lists(self) -> None:
1796-
"""Test performance with large keyword argument lists."""
1797-
# Create keyword with many arguments
1798-
large_args = [Token(Token.ARGUMENT, f"arg{i}=value{i}", 1, i * 20) for i in range(1000)]
1799-
processor = ArgumentProcessor(large_args)
1800-
1801-
# Test that processing is efficient
1802-
import time
1803-
1804-
start_time = time.time()
1805-
1806-
processed_count = 0
1807-
while processor.has_next():
1808-
token = processor.consume()
1809-
if token:
1810-
processed_count += 1
1811-
1812-
end_time = time.time()
1813-
1814-
# Should process quickly (< 100ms for 1000 args)
1815-
assert (end_time - start_time) < 0.1
1816-
assert processed_count == 1000
1817-
1818-
def test_unicode_handling_performance(self) -> None:
1819-
"""Test performance with Unicode characters in tokens."""
1820-
# Create tokens with various Unicode characters
1821-
unicode_tokens = [
1822-
Token(Token.ARGUMENT, f"测试参数{i}", 1, i * 15) # Chinese
1823-
for i in range(100) # Reduced to 100 for more realistic test
1824-
]
1825-
1826-
processor = ArgumentProcessor(unicode_tokens)
1827-
generator = SemanticTokenGenerator()
1828-
namespace = type(
1829-
"MockNamespace",
1830-
(),
1831-
{
1832-
"find_keyword": lambda self, name, **kwargs: None,
1833-
"languages": None,
1834-
},
1835-
)()
1836-
1837-
# Test processing time
1838-
import time
1839-
1840-
start_time = time.time()
1841-
1842-
processed_tokens = []
1843-
while processor.has_next():
1844-
token = processor.consume()
1845-
if token:
1846-
node = KeywordCall([])
1847-
try:
1848-
sem_tokens = list(generator.generate_sem_tokens(token, node, namespace, None))
1849-
processed_tokens.extend(sem_tokens)
1850-
except Exception:
1851-
# Some tokens might not be processable, that's OK for performance test
1852-
pass
1853-
1854-
end_time = time.time()
1855-
1856-
# Should handle Unicode efficiently (< 200ms for 100 tokens)
1857-
assert (end_time - start_time) < 0.2
1858-
# At least some tokens should be processed
1859-
assert len(processed_tokens) >= 0 # Just ensure no crash
1860-
1861-
def test_memory_usage_large_robot_files(self) -> None:
1862-
"""Test memory usage with large Robot Framework files."""
1863-
# Simulate large Robot file content
1864-
large_robot_content = """*** Test Cases ***
1865-
"""
1866-
for i in range(100):
1867-
large_robot_content += f"""Test Case {i}
1868-
Log message=Test message {i} level=INFO
1869-
Set Variable ${{var{i}}} value{i}
1870-
Should Be Equal first=${{var{i}}} second=value{i}
1871-
1872-
"""
1873-
1874-
# Parse with Robot Framework
1875-
model = get_model(large_robot_content)
1876-
generator = SemanticTokenGenerator()
1877-
namespace = type(
1878-
"MockNamespace",
1879-
(),
1880-
{
1881-
"find_keyword": lambda self, name, **kwargs: None,
1882-
"languages": None,
1883-
},
1884-
)()
1885-
1886-
# Collect all tokens
1887-
all_tokens = []
1888-
import ast
1889-
1890-
for node in ast.walk(model):
1891-
if hasattr(node, "tokens"):
1892-
for token in getattr(node, "tokens", []):
1893-
if token.type not in [Token.SEPARATOR, Token.EOL, Token.EOS]:
1894-
all_tokens.append((token, node))
1895-
1896-
# Process all tokens and measure memory (simplified)
1897-
import time
1898-
1899-
start_time = time.time()
1900-
processed_semantic_tokens = []
1901-
1902-
for token, node in all_tokens:
1903-
try:
1904-
sem_tokens = list(generator.generate_sem_tokens(token, node, namespace, None))
1905-
processed_semantic_tokens.extend(sem_tokens)
1906-
except Exception:
1907-
pass # Ignore processing errors for this performance test
1908-
1909-
end_time = time.time()
1910-
1911-
# Should process large files reasonably quickly
1912-
assert (end_time - start_time) < 2.0 # 2 seconds max
1913-
assert len(processed_semantic_tokens) >= len(all_tokens) * 0.5 # At least 50% processed
1914-
1915-
19161755
class TestRealWorldIntegration:
19171756
"""Real-world integration tests with actual Robot Framework patterns."""
19181757

0 commit comments

Comments
 (0)