Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,15 @@ Message Template Parser → Enrichment → Filtering → Capturing → Sinks (Ou
- Environment variable support: `MTLOG_SELFLOG=stderr/stdout/file`
- Reports sink failures, template errors, panic recovery, and configuration issues

### 7. Template Cache (v0.8.1+)
- **LRU cache** for parsed message templates to avoid repeated allocations
- **Bounded size** (default: 10,000) to prevent memory exhaustion from dynamic templates
- **Sharded design** with up to 64 shards for concurrent access
- **O(1) operations** with proper LRU eviction
- **Optional TTL** support for time-based expiration
- **Thread-safe** with atomic statistics tracking
- **Security fix** for issue #39 - prevents DoS via unbounded template generation

## Development Commands

```bash
Expand Down
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,29 @@ acmeName := mtlog.ExtractTypeNameWithCacheKey[User](opts, "tenant:acme")

### Cache Configuration

#### Template Cache

The message template parser uses an LRU cache to avoid repeated parsing of templates. This cache is bounded to prevent memory exhaustion from dynamic template generation:

```go
import "github.com/willibrandon/mtlog/internal/parser"

// Configure template cache at application startup
parser.ConfigureCache(
parser.WithMaxSize(50_000), // Maximum number of cached templates (default: 10,000)
parser.WithTTL(5 * time.Minute), // Optional: expire entries after 5 minutes
)

// Monitor cache performance
stats := parser.GetCacheStats()
fmt.Printf("Template cache: hits=%d, misses=%d, evictions=%d, size=%d/%d\n",
stats.Hits, stats.Misses, stats.Evictions, stats.Size, stats.MaxSize)
```

The template cache protects against memory exhaustion from dynamic template generation (e.g., `fmt.Sprintf("User %d: {Action}", id)`) by enforcing strict size limits with LRU eviction.

#### Type Name Cache

The type name cache can be configured via environment variables:

```bash
Expand Down Expand Up @@ -712,6 +735,7 @@ See the [examples](./examples) directory for complete examples:
- [Durable buffering](./examples/durable/main.go)
- [Dynamic levels](./examples/dynamic-levels/main.go)
- [Configuration](./examples/configuration/main.go)
- [Template Cache](./examples/template-cache/main.go)
- [Generics usage](./examples/generics/main.go)

## Ecosystem Compatibility
Expand Down
65 changes: 65 additions & 0 deletions examples/template-cache/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
package main

import (
"fmt"
"time"

"github.com/willibrandon/mtlog"
"github.com/willibrandon/mtlog/core"
"github.com/willibrandon/mtlog/internal/parser"
)

func main() {
// Configure the template cache at application startup
// This should be done once, before any logging
parser.ConfigureCache(
parser.WithMaxSize(5000), // Limit cache to 5000 templates
parser.WithTTL(10*time.Minute), // Expire entries after 10 minutes
)

// Create logger
logger := mtlog.New(
mtlog.WithConsole(),
mtlog.WithMinimumLevel(core.InformationLevel),
)

// Normal logging - templates are automatically cached
for i := 0; i < 100; i++ {
// These templates will be cached after first parse
logger.Information("User {UserId} logged in", i)
logger.Information("Order {OrderId} processed", i*100)
}

// Check cache statistics
stats := parser.GetCacheStats()
fmt.Printf("\n=== Template Cache Statistics ===\n")
fmt.Printf("Cache hits: %d\n", stats.Hits)
fmt.Printf("Cache misses: %d\n", stats.Misses)
fmt.Printf("Evictions: %d\n", stats.Evictions)
fmt.Printf("Current size: %d/%d\n", stats.Size, stats.MaxSize)

if stats.Hits+stats.Misses > 0 {
hitRate := float64(stats.Hits) / float64(stats.Hits+stats.Misses) * 100
fmt.Printf("Hit rate: %.1f%%\n", hitRate)
}

// Demonstrate protection against dynamic template generation
fmt.Println("\n=== Dynamic Template Protection Demo ===")

// This would be dangerous without cache limits - could exhaust memory
for i := 0; i < 10000; i++ {
// Each iteration creates a unique template
dynamicTemplate := fmt.Sprintf("Iteration %d: {{RequestId}} completed", i)
logger.Information(dynamicTemplate, fmt.Sprintf("req-%d", i))
}

// Check stats again - should show evictions
stats = parser.GetCacheStats()
fmt.Printf("\nAfter dynamic templates:\n")
fmt.Printf("Cache size: %d (still bounded at max)\n", stats.Size)
fmt.Printf("Evictions: %d (old templates evicted)\n", stats.Evictions)

// The cache protects against memory exhaustion
// Even with 10,000 unique templates, memory usage is bounded
fmt.Println("\n✓ Cache successfully prevented memory exhaustion")
}
41 changes: 19 additions & 22 deletions internal/parser/cache.go
Original file line number Diff line number Diff line change
@@ -1,26 +1,14 @@
package parser

import (
"sync"
)

// templateCache is a thread-safe cache for parsed templates.
var templateCache = &struct {
sync.RWMutex
templates map[string]*MessageTemplate
}{
templates: make(map[string]*MessageTemplate),
}

// ParseCached parses a template with caching to avoid repeated allocations.
// It uses the global LRU cache with bounded size to prevent memory exhaustion.
func ParseCached(template string) (*MessageTemplate, error) {
cache := GetGlobalCache()

// Check cache first
templateCache.RLock()
if cached, ok := templateCache.templates[template]; ok {
templateCache.RUnlock()
if cached, ok := cache.Get(template); ok {
return cached, nil
}
templateCache.RUnlock()

// Parse if not cached
parsed, err := Parse(template)
Expand All @@ -29,13 +17,17 @@ func ParseCached(template string) (*MessageTemplate, error) {
}

// Store in cache
templateCache.Lock()
templateCache.templates[template] = parsed
templateCache.Unlock()
cache.Put(template, parsed)

return parsed, nil
}

// ConfigureCache is a convenience function to configure the global cache
// This should be called at application startup before any cache usage
func ConfigureCache(opts ...CacheOption) {
ConfigureGlobalCache(opts...)
}

// ExtractPropertyNamesFromTemplate extracts property names from an already parsed template.
func ExtractPropertyNamesFromTemplate(tmpl *MessageTemplate) []string {
names := make([]string, 0, len(tmpl.Tokens)/2) // Pre-allocate with reasonable capacity
Expand All @@ -55,7 +47,12 @@ func ExtractPropertyNamesFromTemplate(tmpl *MessageTemplate) []string {

// ClearCache clears the template cache (useful for tests).
func ClearCache() {
templateCache.Lock()
templateCache.templates = make(map[string]*MessageTemplate)
templateCache.Unlock()
cache := GetGlobalCache()
cache.Clear()
}

// GetCacheStats returns global cache statistics
func GetCacheStats() Stats {
cache := GetGlobalCache()
return cache.Stats()
}
144 changes: 144 additions & 0 deletions internal/parser/cache_security_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
package parser

import (
"fmt"
"runtime"
"testing"
)

// TestCacheMemoryExhaustionProtection demonstrates that the cache is protected
// against the memory exhaustion vulnerability described in issue #39
func TestCacheMemoryExhaustionProtection(t *testing.T) {
// Create a dedicated cache for this test to avoid interference
cache := NewTemplateCache(WithMaxSize(100))
defer cache.Close()

// Get initial memory stats
var m1 runtime.MemStats
runtime.ReadMemStats(&m1)

// Simulate the attack scenario from issue #39
// Try to create 1 million unique templates
for i := 0; i < 1_000_000; i++ {
template := fmt.Sprintf("User %d: {Action}", i)

// Check cache first
if cached, ok := cache.Get(template); ok {
_ = cached
continue
}

// Parse if not cached
parsed, err := Parse(template)
if err != nil {
t.Fatalf("Parse failed at iteration %d: %v", i, err)
}

// Store in cache
cache.Put(template, parsed)
}

// Force garbage collection
runtime.GC()
runtime.GC()

// Get final memory stats
var m2 runtime.MemStats
runtime.ReadMemStats(&m2)

// Check cache stats
stats := cache.Stats()

// Cache size should be bounded at max size
if stats.Size > 100 {
t.Errorf("Cache size %d exceeds configured max of 100", stats.Size)
}

// Should have many evictions (approximately 999,900)
if stats.Evictions < 999_000 {
t.Errorf("Expected at least 999,000 evictions, got %d", stats.Evictions)
}

// Memory growth should be minimal (allowing for some overhead)
memGrowth := m2.Alloc - m1.Alloc
maxAllowedGrowth := uint64(10 * 1024 * 1024) // 10MB max growth

if memGrowth > maxAllowedGrowth {
t.Errorf("Memory grew by %d bytes, exceeding allowed %d bytes", memGrowth, maxAllowedGrowth)
}

t.Logf("Memory growth: %d bytes", memGrowth)
t.Logf("Cache stats: Size=%d, Evictions=%d, Hits=%d, Misses=%d",
stats.Size, stats.Evictions, stats.Hits, stats.Misses)
}

// TestCacheWithDynamicTemplates tests the specific scenario mentioned in the issue
func TestCacheWithDynamicTemplates(t *testing.T) {
// Create a dedicated cache for this test
cache := NewTemplateCache(WithMaxSize(50))
defer cache.Close()

// Simulate dynamic template generation in a loop
for i := 0; i < 10_000; i++ {
// Dynamic template that changes with each iteration
dynamicTemplate := fmt.Sprintf("Iteration %d: User {UserId} performed {Action} at {Timestamp}", i)

// Check cache first
if cached, ok := cache.Get(dynamicTemplate); ok {
_ = cached
continue
}

// Parse if not cached
parsed, err := Parse(dynamicTemplate)
if err != nil {
t.Fatalf("Failed to parse template at iteration %d: %v", i, err)
}

if parsed == nil {
t.Fatal("Got nil template")
}

// Store in cache
cache.Put(dynamicTemplate, parsed)
}

// Verify cache is still bounded
stats := cache.Stats()
if stats.Size > 50 {
t.Errorf("Cache size %d exceeds max size 50", stats.Size)
}

// Should have significant evictions
if stats.Evictions < 9900 {
t.Errorf("Expected at least 9900 evictions, got %d", stats.Evictions)
}

t.Logf("After 10,000 dynamic templates - Size: %d, Evictions: %d", stats.Size, stats.Evictions)
}

// BenchmarkCacheUnderPressure simulates continuous pressure with unique templates
func BenchmarkCacheUnderPressure(b *testing.B) {
ResetGlobalCacheForTesting()
ConfigureGlobalCache(WithMaxSize(1000))
defer func() {
ClearCache()
ResetGlobalCacheForTesting()
}()

b.ResetTimer()

for i := 0; i < b.N; i++ {
// Each iteration uses a unique template
template := fmt.Sprintf("Benchmark iteration %d: {Value}", i)
_, err := ParseCached(template)
if err != nil {
b.Fatal(err)
}
}

// Report final stats
stats := GetCacheStats()
b.ReportMetric(float64(stats.Evictions), "evictions")
b.ReportMetric(float64(stats.Size), "final_size")
}
Loading
Loading