-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathschedulers.go
188 lines (161 loc) · 5.46 KB
/
schedulers.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package jobs
import (
"errors"
"fmt"
"time"
"github.com/cjdelisle/matterfoss-server/v6/model"
"github.com/cjdelisle/matterfoss-server/v6/shared/mlog"
)
type Schedulers struct {
stop chan bool
stopped chan bool
configChanged chan *model.Config
clusterLeaderChanged chan bool
listenerId string
jobs *JobServer
isLeader bool
running bool
schedulers map[string]model.Scheduler
nextRunTimes map[string]*time.Time
}
var (
ErrSchedulersNotRunning = errors.New("job schedulers are not running")
ErrSchedulersRunning = errors.New("job schedulers are running")
ErrSchedulersUninitialized = errors.New("job schedulers are not initialized")
)
func (schedulers *Schedulers) AddScheduler(name string, scheduler model.Scheduler) {
schedulers.schedulers[name] = scheduler
}
// Start starts the schedulers. This call is not safe for concurrent use.
// Synchronization should be implemented by the caller.
func (schedulers *Schedulers) Start() {
schedulers.stop = make(chan bool)
schedulers.stopped = make(chan bool)
schedulers.listenerId = schedulers.jobs.ConfigService.AddConfigListener(schedulers.handleConfigChange)
go func() {
mlog.Info("Starting schedulers.")
defer func() {
mlog.Info("Schedulers stopped.")
close(schedulers.stopped)
}()
now := time.Now()
for name, scheduler := range schedulers.schedulers {
if !scheduler.Enabled(schedulers.jobs.Config()) {
schedulers.nextRunTimes[name] = nil
} else {
schedulers.setNextRunTime(schedulers.jobs.Config(), name, now, false)
}
}
for {
timer := time.NewTimer(1 * time.Minute)
select {
case <-schedulers.stop:
mlog.Debug("Schedulers received stop signal.")
timer.Stop()
return
case now = <-timer.C:
cfg := schedulers.jobs.Config()
for name, nextTime := range schedulers.nextRunTimes {
if nextTime == nil {
continue
}
if time.Now().After(*nextTime) {
scheduler := schedulers.schedulers[name]
if scheduler == nil || !schedulers.isLeader || !scheduler.Enabled(cfg) {
continue
}
if _, err := schedulers.scheduleJob(cfg, name, scheduler); err != nil {
mlog.Error("Failed to schedule job", mlog.String("scheduler", name), mlog.Err(err))
continue
}
schedulers.setNextRunTime(cfg, name, now, true)
}
}
case newCfg := <-schedulers.configChanged:
for name, scheduler := range schedulers.schedulers {
if !schedulers.isLeader || !scheduler.Enabled(newCfg) {
schedulers.nextRunTimes[name] = nil
} else {
schedulers.setNextRunTime(newCfg, name, now, false)
}
}
case isLeader := <-schedulers.clusterLeaderChanged:
for name := range schedulers.schedulers {
schedulers.isLeader = isLeader
if !isLeader {
schedulers.nextRunTimes[name] = nil
} else {
schedulers.setNextRunTime(schedulers.jobs.Config(), name, now, false)
}
}
}
timer.Stop()
}
}()
schedulers.running = true
}
// Stop stops the schedulers. This call is not safe for concurrent use.
// Synchronization should be implemented by the caller.
func (schedulers *Schedulers) Stop() {
mlog.Info("Stopping schedulers.")
close(schedulers.stop)
<-schedulers.stopped
schedulers.jobs.ConfigService.RemoveConfigListener(schedulers.listenerId)
schedulers.listenerId = ""
schedulers.running = false
}
func (schedulers *Schedulers) setNextRunTime(cfg *model.Config, name string, now time.Time, pendingJobs bool) {
scheduler := schedulers.schedulers[name]
if !pendingJobs {
pj, err := schedulers.jobs.CheckForPendingJobsByType(name)
if err != nil {
mlog.Error("Failed to set next job run time", mlog.Err(err))
schedulers.nextRunTimes[name] = nil
return
}
pendingJobs = pj
}
lastSuccessfulJob, err := schedulers.jobs.GetLastSuccessfulJobByType(name)
if err != nil {
mlog.Error("Failed to set next job run time", mlog.Err(err))
schedulers.nextRunTimes[name] = nil
return
}
schedulers.nextRunTimes[name] = scheduler.NextScheduleTime(cfg, now, pendingJobs, lastSuccessfulJob)
mlog.Debug("Next run time for scheduler", mlog.String("scheduler_name", name), mlog.String("next_runtime", fmt.Sprintf("%v", schedulers.nextRunTimes[name])))
}
func (schedulers *Schedulers) scheduleJob(cfg *model.Config, name string, scheduler model.Scheduler) (*model.Job, *model.AppError) {
pendingJobs, err := schedulers.jobs.CheckForPendingJobsByType(name)
if err != nil {
return nil, err
}
lastSuccessfulJob, err2 := schedulers.jobs.GetLastSuccessfulJobByType(name)
if err2 != nil {
return nil, err
}
return scheduler.ScheduleJob(cfg, pendingJobs, lastSuccessfulJob)
}
func (schedulers *Schedulers) handleConfigChange(_, newConfig *model.Config) {
mlog.Debug("Schedulers received config change.")
select {
case schedulers.configChanged <- newConfig:
case <-schedulers.stop:
}
}
func (schedulers *Schedulers) handleClusterLeaderChange(isLeader bool) {
select {
case schedulers.clusterLeaderChanged <- isLeader:
default:
mlog.Debug("Sending cluster leader change message to schedulers failed.")
// Drain the buffered channel to make room for the latest change.
select {
case <-schedulers.clusterLeaderChanged:
default:
}
// Enqueue the latest change. This operation is safe due to this method
// being called under lock.
schedulers.clusterLeaderChanged <- isLeader
}
}