Supercharge your Go LLM applications with seamless observability! GoLangfuse is a production-ready, asynchronous client library for the Langfuse API that provides comprehensive LLM observability with enterprise-grade reliability and minimal performance overhead.
Langfuse is an open-source observability platform built specifically for LLM applications. Think of it as your AI app's flight recorder and control panel all in one! Monitor performance, detect issues, evaluate quality, and optimize your LLM applications with powerful analytics.
Built for production Go applications that demand both performance and reliability, GoLangfuse offers:
- β‘ Asynchronous Processing - Non-blocking event tracking with configurable worker goroutines
- π Intelligent Batching - Configurable batch processing with automatic flushing and timeouts
- π‘οΈ Enterprise Reliability - Built-in retry logic with exponential backoff and graceful degradation
- π Comprehensive Monitoring - Real-time metrics, health checks, and performance tracking
- π― Complete LLM Coverage - Track every aspect of your AI interactions:
- π Traces - Complete user sessions and conversation flows
- β±οΈ Spans - Individual processing steps and operations
- π€ Generations - LLM API calls with tokens, costs, and performance
- π Scores - Quality metrics and evaluation results
- π€ Sessions - User interaction grouping and analytics
- π§ Production Features:
- Automatic request/response compression (gzip)
- Context-aware cancellation and timeouts
- Structured error handling with detailed diagnostics
- Input validation with comprehensive error reporting
- Graceful shutdown with event flushing
- π§© Zero-Friction Integration - Drop-in solution with minimal configuration
One line and you're ready to go:
go get github.com/bdpiprava/GoLangfuse
For comprehensive documentation, examples, and API reference, visit our GitHub Pages Documentation.
package main
import (
"context"
"log"
"github.com/bdpiprava/GoLangfuse"
"github.com/bdpiprava/GoLangfuse/config"
"github.com/bdpiprava/GoLangfuse/types"
)
func main() {
// Initialize with environment-based configuration
cfg := &config.Langfuse{
URL: "https://api.langfuse.com", // Or your self-hosted instance
PublicKey: "YOUR_PUBLIC_KEY",
SecretKey: "YOUR_SECRET_KEY",
// Production-ready defaults
NumberOfEventProcessor: 4, // Concurrent workers
BatchSize: 10, // Events per batch
BatchTimeout: "5s", // Max wait time
MaxRetries: 3, // Retry failed requests
}
// Create the client with automatic configuration validation
client, err := langfuse.NewWithConfig(cfg)
if err != nil {
log.Fatalf("Failed to create Langfuse client: %v", err)
}
defer client.Shutdown(context.Background()) // Graceful shutdown
// Track a complete user interaction
traceEvent := &types.TraceEvent{
Name: "ai-assistant-conversation",
UserID: "user-123",
SessionID: "session-456",
Tags: []string{"production", "gpt-4"},
Metadata: map[string]any{
"app_version": "v2.1.0",
"channel": "web",
},
Input: map[string]any{
"query": "How do I optimize my Go application for performance?",
},
Output: map[string]any{
"response": "Here are the key optimization strategies...",
"sources": []string{"official-docs", "best-practices"},
},
}
// Fire-and-forget: Events are processed asynchronously
eventID := client.AddEvent(context.Background(), traceEvent)
log.Printf("Event queued with ID: %s", eventID)
// Your application continues without blocking!
}
func handleUserQuery(ctx context.Context, client *langfuse.Langfuse, userID, query string) error {
// 1. Start a trace for the complete operation
traceEvent := &types.TraceEvent{
Name: "llm-query-pipeline",
UserID: userID,
SessionID: getSessionID(ctx),
Tags: []string{"production", "pipeline-v2"},
Input: map[string]any{"query": query},
}
traceID := client.AddEvent(ctx, traceEvent)
// 2. Track preprocessing step
spanEvent := &types.SpanEvent{
TraceID: traceID.String(),
Name: "query-preprocessing",
StartTime: time.Now(),
Input: map[string]any{"raw_query": query},
Metadata: map[string]any{"step": "preprocessing"},
}
client.AddEvent(ctx, spanEvent)
// 3. Track LLM generation with comprehensive metrics
genStart := time.Now()
response, usage := callLLMProvider(ctx, query) // Your LLM call
genEvent := &types.GenerationEvent{
TraceID: traceID.String(),
Name: "openai-gpt4-generation",
Model: "gpt-4-turbo",
StartTime: genStart,
EndTime: time.Now(),
Input: map[string]any{
"messages": []map[string]any{
{"role": "user", "content": query},
},
"temperature": 0.7,
"max_tokens": 2000,
},
Output: map[string]any{
"response": response,
"finish_reason": "stop",
},
Usage: types.Usage{
Input: usage.PromptTokens,
Output: usage.CompletionTokens,
Total: usage.TotalTokens,
Unit: types.Tokens,
InputCost: calculateCost(usage.PromptTokens, "input"),
OutputCost: calculateCost(usage.CompletionTokens, "output"),
TotalCost: calculateTotalCost(usage),
},
}
client.AddEvent(ctx, genEvent)
// 4. Track quality metrics
scoreEvent := &types.ScoreEvent{
TraceID: traceID.String(),
Name: "response-quality",
Value: calculateQualityScore(response),
Comment: "Automated quality assessment",
}
client.AddEvent(ctx, scoreEvent)
return nil
}
import (
"net/http"
"time"
"github.com/bdpiprava/GoLangfuse"
"github.com/bdpiprava/GoLangfuse/config"
)
func createProductionClient() (*langfuse.Langfuse, error) {
// Custom HTTP client for high-throughput scenarios
httpClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 20,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
},
}
cfg := &config.Langfuse{
URL: "https://api.langfuse.com",
PublicKey: os.Getenv("LANGFUSE_PUBLIC_KEY"),
SecretKey: os.Getenv("LANGFUSE_SECRET_KEY"),
// High-throughput settings
NumberOfEventProcessor: 8, // More workers for concurrency
BatchSize: 50, // Larger batches
BatchTimeout: "2s", // Faster flushing
MaxRetries: 5, // More resilience
RetryDelay: "1s", // Retry timing
// Production settings
EnableGzip: true, // Compress large payloads
EnableMetrics: true, // Performance monitoring
}
return langfuse.NewWithClient(cfg, httpClient)
}
// Use environment variables for configuration
func init() {
// Load from .env file in development
godotenv.Load()
}
func main() {
// Configuration loaded automatically from environment
client, err := langfuse.NewFromEnv()
if err != nil {
log.Fatalf("Failed to initialize Langfuse: %v", err)
}
defer client.Shutdown(context.Background())
// Client is ready to use with production-grade defaults
}
# Core configuration
LANGFUSE_URL=https://api.langfuse.com
LANGFUSE_PUBLIC_KEY=pk_your_public_key
LANGFUSE_SECRET_KEY=sk_your_secret_key
# Performance tuning (optional)
LANGFUSE_NUM_OF_EVENT_PROCESSOR=4
LANGFUSE_BATCH_SIZE=10
LANGFUSE_BATCH_TIMEOUT=5s
LANGFUSE_MAX_RETRIES=3
LANGFUSE_RETRY_DELAY=1s
# Features (optional)
LANGFUSE_ENABLE_GZIP=true
LANGFUSE_ENABLE_METRICS=true
GoLangfuse supports all Langfuse event types with full type safety and validation:
Container for complete user interactions or request flows:
traceEvent := &types.TraceEvent{
Name: "ai-chat-session",
UserID: "user-123",
SessionID: "session-456",
Tags: []string{"production", "chat-v2"},
Metadata: map[string]any{
"client_version": "2.1.0",
"experiment_id": "exp-789",
},
Input: map[string]any{"initial_query": "Hello!"},
Output: map[string]any{"final_response": "Goodbye!"},
Public: false,
Environment: "production",
}
Individual processing steps within traces:
spanEvent := &types.SpanEvent{
TraceID: traceID.String(),
ParentObservationID: parentSpanID, // For nested spans
Name: "document-retrieval",
StartTime: time.Now(),
EndTime: time.Now().Add(500 * time.Millisecond),
Input: map[string]any{"query": "search terms"},
Output: map[string]any{"documents": []string{"doc1", "doc2"}},
Metadata: map[string]any{"search_engine": "elasticsearch"},
Level: types.Info, // Default, Warn, Error
StatusMessage: "Retrieved 15 relevant documents",
}
LLM API calls with comprehensive tracking:
genEvent := &types.GenerationEvent{
TraceID: traceID.String(),
Name: "gpt4-chat-completion",
Model: "gpt-4-1106-preview",
ModelParameters: map[string]any{
"temperature": 0.7,
"max_tokens": 1000,
"top_p": 0.9,
},
Input: []map[string]any{
{"role": "user", "content": "Explain quantum computing"},
},
Output: map[string]any{
"content": "Quantum computing is...",
"finish_reason": "stop",
},
StartTime: startTime,
EndTime: endTime,
CompletionStartTime: &firstTokenTime, // For streaming
Usage: types.Usage{
Input: 45, // Prompt tokens
Output: 287, // Completion tokens
Total: 332, // Total tokens
Unit: types.Tokens,
InputCost: 0.00135, // Cost in USD
OutputCost: 0.00861,
TotalCost: 0.00996,
},
Metadata: map[string]any{
"api_version": "2023-12-01",
"stream": false,
},
}
Quality and performance metrics:
scoreEvent := &types.ScoreEvent{
TraceID: traceID.String(),
ObservationID: generationID.String(), // Link to specific generation
Name: "response-quality",
Value: 0.92, // 0-1 for quality scores
Comment: "High quality, factually accurate response",
ConfigID: "quality-eval-v2",
DataType: types.Numeric, // Numeric, Categorical, Boolean
Source: types.Annotation, // API, Annotation, Review
}
// Categorical score example
categoryScore := &types.ScoreEvent{
TraceID: traceID.String(),
Name: "sentiment",
StringValue: "positive", // For categorical scores
DataType: types.Categorical,
Source: types.API,
}
User session management and analytics:
sessionEvent := &types.SessionEvent{
Name: "customer-support-session",
UserID: "user-123",
Metadata: map[string]any{
"support_tier": "premium",
"issue_category": "billing",
"agent_id": "agent-456",
},
Public: false,
}
- Intelligent Batching: Events are automatically batched for optimal API efficiency
- Configurable Workers: Multiple goroutines process events concurrently
- Graceful Shutdown:
client.Shutdown()
ensures all events are flushed before exit - Health Monitoring: Built-in metrics track queue depth, processing rates, and errors
- Exponential Backoff: Automatic retry with configurable delays and max attempts
- Circuit Breaker: Fails fast when API is consistently unavailable
- Structured Errors: Detailed error information for debugging and monitoring
- Input Validation: Comprehensive validation with helpful error messages
// Get client metrics
metrics := client.GetMetrics()
fmt.Printf("Queue depth: %d\n", metrics.QueueDepth)
fmt.Printf("Events sent: %d\n", metrics.EventsSent)
fmt.Printf("Errors: %d\n", metrics.Errors)
// Health check
if !client.IsHealthy() {
log.Warn("Langfuse client is not healthy")
}
- Go 1.21 or higher
- Docker and Docker Compose (for integration tests)
# Clone the repository
git clone https://github.com/bdpiprava/GoLangfuse.git
cd GoLangfuse
# Install dependencies
go mod download
# Start local Langfuse server (for integration tests)
docker-compose up -d
# Run unit tests
make test
# Run integration tests (requires running Langfuse server)
make test-integration
# Run all tests with coverage
make test-coverage
# Lint the code
make lint
# Build the library
make build
make test # Run unit tests
make test-integration # Run integration tests
make test-coverage # Run tests with coverage report
make lint # Run golangci-lint
make build # Build the library
make clean # Clean build artifacts
make docker-up # Start local Langfuse server
make docker-down # Stop local Langfuse server
/
βββ config/ # Configuration management
βββ types/ # Event type definitions
βββ logger/ # Logging utilities
βββ mock/ # Test mocks
βββ test-integration/ # Integration test suite
βββ vendor/ # Vendored dependencies
βββ client.go # HTTP client implementation
βββ langfuse.go # Main service logic
βββ errors.go # Error handling
βββ metrics.go # Performance monitoring
βββ *_test.go # Unit tests
We welcome contributions! Please see our contribution guidelines:
-
Fork and Clone
git fork https://github.com/bdpiprava/GoLangfuse.git git clone https://github.com/YOUR_USERNAME/GoLangfuse.git cd GoLangfuse
-
Create Feature Branch
git checkout -b feature/your-feature-name
-
Make Changes
- Follow Go conventions and existing code style
- Add tests for new functionality
- Update documentation as needed
- Run
make lint
andmake test
before committing
-
Commit and Push
git commit -m "feat: add your feature description" git push origin feature/your-feature-name
-
Create Pull Request
- Provide clear description of changes
- Link any related issues
- Ensure CI passes
- π Bug Fixes: Help us maintain reliability
- β¨ Features: Add new Langfuse capabilities
- π Documentation: Improve examples and guides
- π§ Performance: Optimize client performance
- π§ͺ Testing: Expand test coverage
- π¦ Dependencies: Update and security patches
GoLangfuse is built for production workloads:
- Throughput: 10,000+ events/second on standard hardware
- Latency: <1ms overhead for event queuing (fire-and-forget)
- Memory: Minimal memory footprint with configurable batching
- Reliability: 99.9%+ delivery rate with retry mechanisms
- Langfuse - The core observability platform
- Langfuse Python SDK - Python client
- Langfuse JS SDK - JavaScript client
This project is licensed under the Apache License 2.0 - see the LICENSE file for details.
- The Langfuse team for building an amazing observability platform
- The Go community for excellent tooling and best practices
- All contributors who help make this library better
Built with β€οΈ for production Go applications. Ready to supercharge your LLM observability? Get started now!