|
| 1 | +package models |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "sync/atomic" |
| 6 | + "testing" |
| 7 | + "time" |
| 8 | +) |
| 9 | + |
| 10 | +type MockAgent struct { |
| 11 | + CallCount int32 |
| 12 | +} |
| 13 | + |
| 14 | +func (m *MockAgent) Generate(ctx context.Context, prompt string) (any, error) { |
| 15 | + atomic.AddInt32(&m.CallCount, 1) |
| 16 | + return "mock response", nil |
| 17 | +} |
| 18 | + |
| 19 | +func (m *MockAgent) GenerateWithFiles(ctx context.Context, prompt string, files []File) (any, error) { |
| 20 | + atomic.AddInt32(&m.CallCount, 1) |
| 21 | + return "mock response with files", nil |
| 22 | +} |
| 23 | + |
| 24 | +func TestCachedLLM_Generate(t *testing.T) { |
| 25 | + mock := &MockAgent{} |
| 26 | + cached := NewCachedLLM(mock, 10, time.Minute, "") |
| 27 | + |
| 28 | + ctx := context.Background() |
| 29 | + prompt := "hello" |
| 30 | + |
| 31 | + // First call - should hit the agent |
| 32 | + _, err := cached.Generate(ctx, prompt) |
| 33 | + if err != nil { |
| 34 | + t.Fatalf("first call failed: %v", err) |
| 35 | + } |
| 36 | + if count := atomic.LoadInt32(&mock.CallCount); count != 1 { |
| 37 | + t.Errorf("expected 1 call, got %d", count) |
| 38 | + } |
| 39 | + |
| 40 | + // Second call - should hit the cache |
| 41 | + _, err = cached.Generate(ctx, prompt) |
| 42 | + if err != nil { |
| 43 | + t.Fatalf("second call failed: %v", err) |
| 44 | + } |
| 45 | + if count := atomic.LoadInt32(&mock.CallCount); count != 1 { |
| 46 | + t.Errorf("expected 1 call (cached), got %d", count) |
| 47 | + } |
| 48 | + |
| 49 | + // Different prompt - should hit the agent |
| 50 | + _, err = cached.Generate(ctx, "world") |
| 51 | + if err != nil { |
| 52 | + t.Fatalf("third call failed: %v", err) |
| 53 | + } |
| 54 | + if count := atomic.LoadInt32(&mock.CallCount); count != 2 { |
| 55 | + t.Errorf("expected 2 calls, got %d", count) |
| 56 | + } |
| 57 | +} |
| 58 | + |
| 59 | +func TestCachedLLM_GenerateWithFiles(t *testing.T) { |
| 60 | + mock := &MockAgent{} |
| 61 | + cached := NewCachedLLM(mock, 10, time.Minute, "") |
| 62 | + |
| 63 | + ctx := context.Background() |
| 64 | + prompt := "analyze" |
| 65 | + files := []File{{Name: "a.txt", Data: []byte("content")}} |
| 66 | + |
| 67 | + // First call |
| 68 | + _, err := cached.GenerateWithFiles(ctx, prompt, files) |
| 69 | + if err != nil { |
| 70 | + t.Fatalf("first call failed: %v", err) |
| 71 | + } |
| 72 | + if count := atomic.LoadInt32(&mock.CallCount); count != 1 { |
| 73 | + t.Errorf("expected 1 call, got %d", count) |
| 74 | + } |
| 75 | + |
| 76 | + // Second call - same files |
| 77 | + _, err = cached.GenerateWithFiles(ctx, prompt, files) |
| 78 | + if err != nil { |
| 79 | + t.Fatalf("second call failed: %v", err) |
| 80 | + } |
| 81 | + if count := atomic.LoadInt32(&mock.CallCount); count != 1 { |
| 82 | + t.Errorf("expected 1 call, got %d", count) |
| 83 | + } |
| 84 | + |
| 85 | + // Different file content |
| 86 | + files2 := []File{{Name: "a.txt", Data: []byte("different")}} |
| 87 | + _, err = cached.GenerateWithFiles(ctx, prompt, files2) |
| 88 | + if err != nil { |
| 89 | + t.Fatalf("third call failed: %v", err) |
| 90 | + } |
| 91 | + if count := atomic.LoadInt32(&mock.CallCount); count != 2 { |
| 92 | + t.Errorf("expected 2 calls, got %d", count) |
| 93 | + } |
| 94 | +} |
0 commit comments