Skip to content

Commit

Permalink
Alerting: Add a feature flag to periodically save states (grafana#80987)
Browse files Browse the repository at this point in the history
  • Loading branch information
JohnnyQQQQ authored Jan 23, 2024
1 parent f7fd8e6 commit aa25776
Show file tree
Hide file tree
Showing 14 changed files with 81 additions and 23 deletions.
4 changes: 4 additions & 0 deletions conf/defaults.ini
Original file line number Diff line number Diff line change
Expand Up @@ -1180,6 +1180,10 @@ min_interval = 10s
# (concurrent queries per rule disabled).
max_state_save_concurrency = 1

# If the feature flag 'alertingSaveStatePeriodic' is enabled, this is the interval that is used to persist the alerting instances to the database.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
state_periodic_save_interval = 5m

[unified_alerting.screenshots]
# Enable screenshots in notifications. You must have either installed the Grafana image rendering
# plugin, or set up Grafana to use a remote rendering service.
Expand Down
9 changes: 9 additions & 0 deletions conf/sample.ini
Original file line number Diff line number Diff line change
Expand Up @@ -1112,6 +1112,15 @@
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;min_interval = 10s

# This is an experimental option to add parallelization to saving alert states in the database.
# It configures the maximum number of concurrent queries per rule evaluated. The default value is 1
# (concurrent queries per rule disabled).
;max_state_save_concurrency = 1

# If the feature flag 'alertingSaveStatePeriodic' is enabled, this is the interval that is used to persist the alerting instances to the database.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;state_periodic_save_interval = 5m

[unified_alerting.reserved_labels]
# Comma-separated list of reserved labels added by the Grafana Alerting engine that should be disabled.
# For example: `disabled_labels=grafana_folder`
Expand Down
1 change: 1 addition & 0 deletions packages/grafana-data/src/types/featureToggles.gen.ts
Original file line number Diff line number Diff line change
Expand Up @@ -177,4 +177,5 @@ export interface FeatureToggles {
jitterAlertRules?: boolean;
jitterAlertRulesWithinGroups?: boolean;
onPremToCloudMigrations?: boolean;
alertingSaveStatePeriodic?: boolean;
}
8 changes: 8 additions & 0 deletions pkg/services/featuremgmt/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -1354,5 +1354,13 @@ var (
Owner: grafanaOperatorExperienceSquad,
Created: time.Date(2024, time.January, 22, 3, 30, 00, 00, time.UTC),
},
{
Name: "alertingSaveStatePeriodic",
Description: "Writes the state periodically to the database, asynchronous to rule evaluation",
Stage: FeatureStagePrivatePreview,
FrontendOnly: false,
Owner: grafanaAlertingSquad,
Created: time.Date(2024, time.January, 22, 12, 0, 0, 0, time.UTC),
},
}
)
1 change: 1 addition & 0 deletions pkg/services/featuremgmt/toggles_gen.csv
Original file line number Diff line number Diff line change
Expand Up @@ -158,3 +158,4 @@ newFolderPicker,experimental,@grafana/grafana-frontend-platform,2024-01-12,false
jitterAlertRules,experimental,@grafana/alerting-squad,2024-01-17,false,false,true,false
jitterAlertRulesWithinGroups,experimental,@grafana/alerting-squad,2024-01-17,false,false,true,false
onPremToCloudMigrations,experimental,@grafana/grafana-operator-experience-squad,2024-01-22,false,false,false,false
alertingSaveStatePeriodic,privatePreview,@grafana/alerting-squad,2024-01-22,false,false,false,false
4 changes: 4 additions & 0 deletions pkg/services/featuremgmt/toggles_gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -642,4 +642,8 @@ const (
// FlagOnPremToCloudMigrations
// In-development feature that will allow users to easily migrate their on-prem Grafana instances to Grafana Cloud.
FlagOnPremToCloudMigrations = "onPremToCloudMigrations"

// FlagAlertingSaveStatePeriodic
// Writes the state periodically to the database, asynchronous to rule evaluation
FlagAlertingSaveStatePeriodic = "alertingSaveStatePeriodic"
)
14 changes: 12 additions & 2 deletions pkg/services/ngalert/metrics/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@ import (
)

type State struct {
StateUpdateDuration prometheus.Histogram
r prometheus.Registerer
StateUpdateDuration prometheus.Histogram
StateFullSyncDuration prometheus.Histogram
r prometheus.Registerer
}

// Registerer exposes the Prometheus register directly. The state package needs this as, it uses a collector to fetch the current alerts by state in the system.
Expand All @@ -27,5 +28,14 @@ func NewStateMetrics(r prometheus.Registerer) *State {
Buckets: []float64{0.01, 0.1, 1, 2, 5, 10},
},
),
StateFullSyncDuration: promauto.With(r).NewHistogram(
prometheus.HistogramOpts{
Namespace: Namespace,
Subsystem: Subsystem,
Name: "state_full_sync_duration_seconds",
Help: "The duration of fully synchronizing the state with the database.",
Buckets: []float64{0.01, 0.1, 1, 2, 5, 10, 60},
},
),
}
}
10 changes: 9 additions & 1 deletion pkg/services/ngalert/ngalert.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,12 @@ func (ng *AlertNG) init() error {
Tracer: ng.tracer,
Log: log.New("ngalert.state.manager"),
}
statePersister := state.NewSyncStatePersisiter(log.New("ngalert.state.manager.persist"), cfg)
logger := log.New("ngalert.state.manager.persist")
statePersister := state.NewSyncStatePersisiter(logger, cfg)
if ng.FeatureToggles.IsEnabledGlobally(featuremgmt.FlagAlertingSaveStatePeriodic) {
ticker := clock.New().Ticker(ng.Cfg.UnifiedAlerting.StatePeriodicSaveInterval)
statePersister = state.NewAsyncStatePersister(logger, ticker, cfg)
}
stateManager := state.NewManager(cfg, statePersister)
scheduler := schedule.NewScheduler(schedCfg, stateManager)

Expand Down Expand Up @@ -423,6 +428,9 @@ func (ng *AlertNG) Run(ctx context.Context) error {
children.Go(func() error {
return ng.schedule.Run(subCtx)
})
children.Go(func() error {
return ng.stateManager.Run(subCtx)
})
}
return children.Wait()
}
Expand Down
7 changes: 6 additions & 1 deletion pkg/services/ngalert/state/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type AlertInstanceManager interface {
}

type StatePersister interface {
Async(ctx context.Context, ticker *clock.Ticker, cache *cache)
Async(ctx context.Context, cache *cache)
Sync(ctx context.Context, span trace.Span, states, staleStates []StateTransition)
}

Expand Down Expand Up @@ -103,6 +103,11 @@ func NewManager(cfg ManagerCfg, statePersister StatePersister) *Manager {
return m
}

func (st *Manager) Run(ctx context.Context) error {
st.persister.Async(ctx, st.cache)
return nil
}

func (st *Manager) Warm(ctx context.Context, rulesReader RuleReader) {
if st.instanceStore == nil {
st.log.Info("Skip warming the state because instance store is not configured")
Expand Down
20 changes: 14 additions & 6 deletions pkg/services/ngalert/state/persister_async.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,27 +8,32 @@ import (
"go.opentelemetry.io/otel/trace"

"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/ngalert/metrics"
)

type AsyncStatePersister struct {
log log.Logger
// doNotSaveNormalState controls whether eval.Normal state is persisted to the database and returned by get methods.
doNotSaveNormalState bool
store InstanceStore
ticker *clock.Ticker
metrics *metrics.State
}

func NewAsyncStatePersister(log log.Logger, cfg ManagerCfg) StatePersister {
func NewAsyncStatePersister(log log.Logger, ticker *clock.Ticker, cfg ManagerCfg) StatePersister {
return &AsyncStatePersister{
log: log,
store: cfg.InstanceStore,
ticker: ticker,
doNotSaveNormalState: cfg.DoNotSaveNormalState,
metrics: cfg.Metrics,
}
}

func (a *AsyncStatePersister) Async(ctx context.Context, ticker *clock.Ticker, cache *cache) {
func (a *AsyncStatePersister) Async(ctx context.Context, cache *cache) {
for {
select {
case <-ticker.C:
case <-a.ticker.C:
if err := a.fullSync(ctx, cache); err != nil {
a.log.Error("Failed to do a full state sync to database", "err", err)
}
Expand All @@ -37,7 +42,7 @@ func (a *AsyncStatePersister) Async(ctx context.Context, ticker *clock.Ticker, c
if err := a.fullSync(context.Background(), cache); err != nil {
a.log.Error("Failed to do a full state sync to database", "err", err)
}
ticker.Stop()
a.ticker.Stop()
a.log.Info("State async worker is shut down.")
return
}
Expand All @@ -46,13 +51,16 @@ func (a *AsyncStatePersister) Async(ctx context.Context, ticker *clock.Ticker, c

func (a *AsyncStatePersister) fullSync(ctx context.Context, cache *cache) error {
startTime := time.Now()
a.log.Info("Full state sync start")
a.log.Debug("Full state sync start")
instances := cache.asInstances(a.doNotSaveNormalState)
if err := a.store.FullSync(ctx, instances); err != nil {
a.log.Error("Full state sync failed", "duration", time.Since(startTime), "instances", len(instances))
return err
}
a.log.Info("Full state sync done", "duration", time.Since(startTime), "instances", len(instances))
a.log.Debug("Full state sync done", "duration", time.Since(startTime), "instances", len(instances))
if a.metrics != nil {
a.metrics.StateFullSyncDuration.Observe(time.Since(startTime).Seconds())
}
return nil
}

Expand Down
12 changes: 4 additions & 8 deletions pkg/services/ngalert/state/persister_async_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func TestAsyncStatePersister_Async(t *testing.T) {
store := &FakeInstanceStore{}
logger := log.New("async.test")

persister := NewAsyncStatePersister(logger, ManagerCfg{
persister := NewAsyncStatePersister(logger, mockClock.Ticker(1*time.Second), ManagerCfg{
InstanceStore: store,
})

Expand All @@ -28,11 +28,9 @@ func TestAsyncStatePersister_Async(t *testing.T) {
cancel()
}()

ticker := mockClock.Ticker(1 * time.Second)

cache := newCache()

go persister.Async(ctx, ticker, cache)
go persister.Async(ctx, cache)

cache.set(&State{
OrgID: 1,
Expand All @@ -52,17 +50,15 @@ func TestAsyncStatePersister_Async(t *testing.T) {
store := &FakeInstanceStore{}
logger := log.New("async.test")

persister := NewAsyncStatePersister(logger, ManagerCfg{
persister := NewAsyncStatePersister(logger, mockClock.Ticker(1*time.Second), ManagerCfg{
InstanceStore: store,
})

ctx, cancel := context.WithCancel(context.Background())

ticker := mockClock.Ticker(1 * time.Second)

cache := newCache()

go persister.Async(ctx, ticker, cache)
go persister.Async(ctx, cache)

cache.set(&State{
OrgID: 1,
Expand Down
3 changes: 1 addition & 2 deletions pkg/services/ngalert/state/persister_noop.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,12 @@ package state
import (
"context"

"github.com/benbjohnson/clock"
"go.opentelemetry.io/otel/trace"
)

type NoopPersister struct{}

func (n *NoopPersister) Async(_ context.Context, _ *clock.Ticker, _ *cache) {}
func (n *NoopPersister) Async(_ context.Context, _ *cache) {}
func (n *NoopPersister) Sync(_ context.Context, _ trace.Span, _, _ []StateTransition) {}

func NewNoopPersister() StatePersister {
Expand Down
3 changes: 1 addition & 2 deletions pkg/services/ngalert/state/persister_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"time"

"github.com/benbjohnson/clock"
"github.com/grafana/dskit/concurrency"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
Expand All @@ -31,7 +30,7 @@ func NewSyncStatePersisiter(log log.Logger, cfg ManagerCfg) StatePersister {
}
}

func (a *SyncStatePersister) Async(_ context.Context, _ *clock.Ticker, _ *cache) {
func (a *SyncStatePersister) Async(_ context.Context, _ *cache) {
a.log.Debug("Async: No-Op")
}
func (a *SyncStatePersister) Sync(ctx context.Context, span trace.Span, states, staleStates []StateTransition) {
Expand Down
8 changes: 7 additions & 1 deletion pkg/setting/setting_unified_alerting.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ type UnifiedAlertingSettings struct {
RemoteAlertmanager RemoteAlertmanagerSettings
Upgrade UnifiedAlertingUpgradeSettings
// MaxStateSaveConcurrency controls the number of goroutines (per rule) that can save alert state in parallel.
MaxStateSaveConcurrency int
MaxStateSaveConcurrency int
StatePeriodicSaveInterval time.Duration
}

// RemoteAlertmanagerSettings contains the configuration needed
Expand Down Expand Up @@ -403,6 +404,11 @@ func (cfg *Cfg) ReadUnifiedAlertingSettings(iniFile *ini.File) error {

uaCfg.MaxStateSaveConcurrency = ua.Key("max_state_save_concurrency").MustInt(1)

uaCfg.StatePeriodicSaveInterval, err = gtime.ParseDuration(valueAsString(ua, "state_periodic_save_interval", (time.Minute * 5).String()))
if err != nil {
return err
}

upgrade := iniFile.Section("unified_alerting.upgrade")
uaCfgUpgrade := UnifiedAlertingUpgradeSettings{
CleanUpgrade: upgrade.Key("clean_upgrade").MustBool(false),
Expand Down

0 comments on commit aa25776

Please sign in to comment.