forked from signal18/replication-manager
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_failover_semisync_autorejoin_safe_XX_MX_MS.go
79 lines (65 loc) · 2.03 KB
/
test_failover_semisync_autorejoin_safe_XX_MX_MS.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL
// Copyright 2017 Signal 18 SARL
// Authors: Guillaume Lefranc <guillaume@signal18.io>
// Stephane Varoqui <svaroqui@gmail.com>
// This source code is licensed under the GNU General Public License, version 3.
package regtest
import (
"sync"
"time"
"github.com/signal18/replication-manager/cluster"
)
func testFailoverSemisyncAutoRejoinSafeMSMXXXRMXMS(cluster *cluster.Cluster, conf string, test *cluster.Test) bool {
cluster.SetFailoverCtr(0)
cluster.SetFailSync(false)
cluster.SetInteractive(false)
cluster.SetRplChecks(false)
cluster.SetRejoin(true)
cluster.SetRejoinFlashback(true)
cluster.SetRejoinDump(true)
cluster.EnableSemisync()
cluster.SetFailTime(0)
cluster.SetFailRestartUnsafe(false)
cluster.SetBenchMethod("table")
SaveMaster := cluster.GetMaster()
cluster.CleanupBench()
cluster.PrepareBench()
go cluster.RunBench()
time.Sleep(4 * time.Second)
SaveMaster2 := cluster.GetSlaves()[0]
cluster.StopDatabaseService(cluster.GetSlaves()[0])
time.Sleep(5 * time.Second)
cluster.RunBench()
cluster.StopDatabaseService(cluster.GetMaster())
time.Sleep(15 * time.Second)
cluster.ForgetTopology()
wg2 := new(sync.WaitGroup)
wg2.Add(1)
go cluster.WaitRejoin(wg2)
cluster.StartDatabaseService(SaveMaster)
wg2.Wait()
//Recovered as slave first wait that it trigger master failover
time.Sleep(5 * time.Second)
cluster.RunBench()
wg2.Add(1)
go cluster.WaitRejoin(wg2)
cluster.StartDatabaseService(SaveMaster2)
wg2.Wait()
time.Sleep(5 * time.Second)
for _, s := range cluster.GetSlaves() {
if s.IsReplicationBroken() {
cluster.LogPrintf(LvlErr, "Slave %s issue on replication", s.URL)
return false
}
}
time.Sleep(10 * time.Second)
if cluster.ChecksumBench() != true {
cluster.LogPrintf(LvlErr, "Inconsitant slave")
return false
}
if len(cluster.GetServers()) == 2 && SaveMaster.URL != cluster.GetMaster().URL {
cluster.LogPrintf(LvlErr, "Unexpected master for 2 nodes cluster")
return false
}
return true
}