Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Robustness retry failpoint #16094

Merged
merged 1 commit into from
Jun 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 34 additions & 48 deletions tests/robustness/failpoints.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ var (
RaftAfterWALReleasePanic Failpoint = goPanicFailpoint{"raftAfterWALRelease", triggerBlackhole{waitTillSnapshot: true}, Follower}
RaftBeforeSaveSnapPanic Failpoint = goPanicFailpoint{"raftBeforeSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
RaftAfterSaveSnapPanic Failpoint = goPanicFailpoint{"raftAfterSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
RandomFailpoint Failpoint = randomFailpoint{[]Failpoint{
allFailpoints = []Failpoint{
KillFailpoint, BeforeCommitPanic, AfterCommitPanic, RaftBeforeSavePanic, RaftAfterSavePanic,
DefragBeforeCopyPanic, DefragBeforeRenamePanic, BackendBeforePreCommitHookPanic, BackendAfterPreCommitHookPanic,
BackendBeforeStartDBTxnPanic, BackendAfterStartDBTxnPanic, BackendBeforeWritebackBufPanic,
Expand All @@ -76,22 +76,40 @@ var (
CompactAfterCommitBatchPanic, RaftBeforeLeaderSendPanic, BlackholePeerNetwork, DelayPeerNetwork,
RaftBeforeFollowerSendPanic, RaftBeforeApplySnapPanic, RaftAfterApplySnapPanic, RaftAfterWALReleasePanic,
RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, BlackholeUntilSnapshot,
}}
}
)

func pickRandomFailpoint(t *testing.T, clus *e2e.EtcdProcessCluster) Failpoint {
availableFailpoints := make([]Failpoint, 0, len(allFailpoints))
for _, failpoint := range allFailpoints {
err := validateFailpoint(clus, failpoint)
if err != nil {
continue
}
availableFailpoints = append(availableFailpoints, failpoint)
}
if len(availableFailpoints) == 0 {
t.Errorf("No available failpoints")
return nil
}
return availableFailpoints[rand.Int()%len(availableFailpoints)]
}

func validateFailpoint(clus *e2e.EtcdProcessCluster, failpoint Failpoint) error {
for _, proc := range clus.Procs {
if !failpoint.Available(*clus.Cfg, proc) {
return fmt.Errorf("failpoint %q not available on %s", failpoint.Name(), proc.Config().Name)
}
}
return nil
}

func injectFailpoints(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, failpoint Failpoint) {
ctx, cancel := context.WithTimeout(ctx, triggerTimeout)
defer cancel()

var err error
successes := 0
failures := 0
for _, proc := range clus.Procs {
if !failpoint.Available(*clus.Cfg, proc) {
t.Errorf("Failpoint %q not available on %s", failpoint.Name(), proc.Config().Name)
return
}
}
for successes < failpointInjectionsCount && failures < failpointInjectionsRetries {
time.Sleep(waitBetweenFailpointTriggers)

Expand Down Expand Up @@ -230,10 +248,16 @@ func (f goPanicFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logg
member := f.pickMember(t, clus)

for member.IsRunning() {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
lg.Info("Setting up gofailpoint", zap.String("failpoint", f.Name()))
err := member.Failpoints().Setup(ctx, f.failpoint, "panic")
if err != nil {
lg.Info("goFailpoint setup failed", zap.String("failpoint", f.Name()), zap.Error(err))
continue
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you provide some history/context on why gofailpoint setup can fail? Perhaps a comment line somewhere here would help future readers of this code as well.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know why :P, I haven't been able to reproduce the issue locally. Only on CI this happens, guessing some deadlock or zombie process thingy.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Dump the log from https://github.com/etcd-io/etcd/actions/runs/5298895925/jobs/9591523910 for release-3.5. Check the goroutines after timeout. There were only two waitProcess stacks.

2023-06-17T18:53:04.8466750Z goroutine 680492 [syscall, 125 minutes]:
2023-06-17T18:53:04.8467149Z syscall.Syscall6(0x1000100000000?, 0x0?, 0x2?, 0xc000b80800?, 0x0?, 0x0?, 0xffffffffffffffff?)
2023-06-17T18:53:04.8467555Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/syscall/syscall_linux.go:90 +0x36
2023-06-17T18:53:04.8467898Z os.(*Process).blockUntilWaitable(0xc0004ea570)
2023-06-17T18:53:04.8468466Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/os/wait_waitid.go:32 +0x87
2023-06-17T18:53:04.8468749Z os.(*Process).wait(0xc0004ea570)
2023-06-17T18:53:04.8469186Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/os/exec_unix.go:22 +0x28
2023-06-17T18:53:04.8469465Z os.(*Process).Wait(...)
2023-06-17T18:53:04.8469777Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/os/exec.go:132
2023-06-17T18:53:04.8470136Z go.etcd.io/etcd/pkg/v3/expect.(*ExpectProcess).waitProcess(0xc0000e8600)
2023-06-17T18:53:04.8470523Z 	/home/runner/work/etcd/etcd/pkg/expect/expect.go:270 +0x5b
2023-06-17T18:53:04.8470883Z go.etcd.io/etcd/pkg/v3/expect.(*ExpectProcess).waitSaveExitErr(0xc0000e8600)
2023-06-17T18:53:04.8471271Z 	/home/runner/work/etcd/etcd/pkg/expect/expect.go:142 +0x59
2023-06-17T18:53:04.8471622Z created by go.etcd.io/etcd/pkg/v3/expect.NewExpectWithEnv

2023-06-17T18:53:04.8656733Z goroutine 680504 [syscall, 125 minutes]:
2023-06-17T18:53:04.8657117Z syscall.Syscall6(0xc00014ee40?, 0xc00014eddc?, 0xc00014ef90?, 0x1?, 0xc00014edd8?, 0xc00014ef01?, 0x2?)
2023-06-17T18:53:04.8657568Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/syscall/syscall_linux.go:90 +0x36
2023-06-17T18:53:04.8657917Z os.(*Process).blockUntilWaitable(0xc000136480)
2023-06-17T18:53:04.8658283Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/os/wait_waitid.go:32 +0x87
2023-06-17T18:53:04.8658574Z os.(*Process).wait(0xc000136480)
2023-06-17T18:53:04.8658907Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/os/exec_unix.go:22 +0x28
2023-06-17T18:53:04.8659188Z os.(*Process).Wait(...)
2023-06-17T18:53:04.8659497Z 	/opt/hostedtoolcache/go/1.19.10/x64/src/os/exec.go:132
2023-06-17T18:53:04.8659865Z go.etcd.io/etcd/pkg/v3/expect.(*ExpectProcess).waitProcess(0xc0005aa0c0)
2023-06-17T18:53:04.8660255Z 	/home/runner/work/etcd/etcd/pkg/expect/expect.go:270 +0x5b
2023-06-17T18:53:04.8660620Z go.etcd.io/etcd/pkg/v3/expect.(*ExpectProcess).waitSaveExitErr(0xc0005aa0c0)
2023-06-17T18:53:04.8661015Z 	/home/runner/work/etcd/etcd/pkg/expect/expect.go:142 +0x59
2023-06-17T18:53:04.8661376Z created by go.etcd.io/etcd/pkg/v3/expect.NewExpectWithEnv
2023-06-17T18:53:04.8661753Z 	/home/runner/work/etcd/etcd/pkg/expect/expect.go:79 +0x285

Because the one member is down.

2023-06-17T16:47:32.7945331Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): goroutine 211 [running]:
2023-06-17T16:47:32.7946881Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): go.etcd.io/gofail/runtime.actPanic(0x0?)
2023-06-17T16:47:32.7948074Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): 	go.etcd.io/gofail@v0.1.0/runtime/terms.go:318 +0x69
2023-06-17T16:47:32.7949038Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): go.etcd.io/gofail/runtime.(*term).do(...)
2023-06-17T16:47:32.7950001Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): 	go.etcd.io/gofail@v0.1.0/runtime/terms.go:290
2023-06-17T16:47:32.8040147Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): go.etcd.io/gofail/runtime.(*terms).eval(0xc00008ad80?)
2023-06-17T16:47:32.8041872Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-0) (30389): {"level":"warn","ts":"2023-06-17T16:47:32.794133Z","caller":"rafthttp/stream.go:421","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"eabdbb777cf498cb","remote-peer-id":"ac4ec652f10e5b49","error":"EOF"}
2023-06-17T16:47:32.8044223Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-0) (30389): {"level":"warn","ts":"2023-06-17T16:47:32.794683Z","caller":"rafthttp/stream.go:421","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"eabdbb777cf498cb","remote-peer-id":"ac4ec652f10e5b49","error":"EOF"}
2023-06-17T16:47:32.8045360Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): 	go.etcd.io/gofail@v0.1.0/runtime/terms.go:105 +0xeb
2023-06-17T16:47:32.8046231Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): go.etcd.io/gofail/runtime.(*Failpoint).Acquire(0xc00022f520?)
2023-06-17T16:47:32.8047198Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): 	go.etcd.io/gofail@v0.1.0/runtime/failpoint.go:38 +0xa5
2023-06-17T16:47:32.8048076Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): go.etcd.io/etcd/server/v3/etcdserver.(*raftNode).start.func1()
2023-06-17T16:47:32.8048958Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): 	go.etcd.io/etcd/server/v3/etcdserver/raft.go:238 +0x9b6
2023-06-17T16:47:32.8050297Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): created by go.etcd.io/etcd/server/v3/etcdserver.(*raftNode).start
2023-06-17T16:47:32.8051299Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-1) (30400): 	go.etcd.io/etcd/server/v3/etcdserver/raft.go:169 +0x7a
2023-06-17T16:47:32.8052733Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-2) (30390): {"level":"warn","ts":"2023-06-17T16:47:32.801145Z","caller":"rafthttp/stream.go:421","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"bf19ae4419db00dc","remote-peer-id":"ac4ec652f10e5b49","error":"EOF"}
2023-06-17T16:47:32.8054480Z /tmp/etcd-release-3.5-failpoints/bin/etcd (TestRobustnessHighTrafficClusterOfSize3-test-2) (30390): {"level":"warn","ts":"2023-06-17T16:47:32.801254Z","caller":"rafthttp/stream.go:421","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"bf19ae4419db00dc","remote-peer-id":"ac4ec652f10e5b49","error":"EOF"}
2023-06-17T16:47:32.8058055Z     logger.go:130: 2023-06-17T16:47:32.802Z	INFO	goFailpoint setup failed	{"failpoint": "raftBeforeLeaderSend", "error": "Put \"http://127.0.0.1:22381/raftBeforeLeaderSend\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
2023-06-17T16:47:32.8059887Z     logger.go:130: 2023-06-17T16:47:32.802Z	INFO	Setting up gofailpoint	{"failpoint": "raftBeforeLeaderSend"}
2023-06-17T16:47:32.8062012Z     logger.go:130: 2023-06-17T16:47:32.802Z	INFO	goFailpoint setup failed	{"failpoint": "raftBeforeLeaderSend", "error": "Put \"http://127.0.0.1:22381/raftBeforeLeaderSend\": dial tcp 127.0.0.1:22381: connect: connection refused"}
2023-06-17T16:47:32.8063463Z     logger.go:130: 2023-06-17T16:47:32.802Z	INFO	Setting up gofailpoint	{"failpoint": "raftBeforeLeaderSend"}
2023-06-17T16:47:32.8065735Z     logger.go:130: 2023-06-17T16:47:32.803Z	INFO	goFailpoint setup failed	{"failpoint": "raftBeforeLeaderSend", "error": "Put \"http://127.0.0.1:22381/raftBeforeLeaderSend\": dial tcp 127.0.0.1:22381: connect: connection refused"}

I think we should enhance the waitProcess

state, err := ep.cmd.Process.Wait()

If the waitProcess return unexpected syscall error, we should also update the exitCode.

I download the code and check it. It's weird. We set timeout is 1 minute but it still run two hours, even if we have select <-ctx.Done().

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why gofailpoint setup can fail

I think if it is panic, the server can't return the error in time, like.

2023-06-17T16:47:32.8058055Z     logger.go:130: 2023-06-17T16:47:32.802Z	INFO	goFailpoint setup failed	{"failpoint": "raftBeforeLeaderSend", "error": "Put \"http://127.0.0.1:22381/raftBeforeLeaderSend\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you look at this @fuweid ?

Copy link
Member

@fuweid fuweid Jun 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. I reproduced it in my local but the log was flushed in tmux and it lost (truncated) 😢 . Reproducing

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

found that issue from expect package and reproduced it in my local. Will file a pr later(OOO currently)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you share what problem you found? It causes problem with working on robustness tests.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Filed #16134
Can someone look into that?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for the late reply. Filed pull request to handle this #16137

}
if !member.IsRunning() {
// TODO: Check member logs that etcd not running is caused panic caused by proper gofailpoint.
Expand All @@ -255,11 +279,7 @@ func (f goPanicFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logg
lg.Info("Member exited as expected", zap.String("member", member.Config().Name))
}

err := member.Start(ctx)
if err != nil {
return err
}
return nil
return member.Start(ctx)
}

func (f goPanicFailpoint) pickMember(t *testing.T, clus *e2e.EtcdProcessCluster) e2e.EtcdProcess {
Expand Down Expand Up @@ -347,40 +367,6 @@ func (t triggerCompact) Available(e2e.EtcdProcessClusterConfig, e2e.EtcdProcess)
return true
}

type randomFailpoint struct {
failpoints []Failpoint
}

func (f randomFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error {
availableFailpoints := make([]Failpoint, 0, len(f.failpoints))
for _, failpoint := range f.failpoints {
count := 0
for _, proc := range clus.Procs {
if failpoint.Available(*clus.Cfg, proc) {
count++
}
}
if count == len(clus.Procs) {
availableFailpoints = append(availableFailpoints, failpoint)
}
}
if len(availableFailpoints) == 0 {
t.Errorf("No available failpoints")
return nil
}
failpoint := availableFailpoints[rand.Int()%len(availableFailpoints)]
lg.Info("Triggering failpoint\n", zap.String("failpoint", failpoint.Name()))
return failpoint.Inject(ctx, t, lg, clus)
}

func (f randomFailpoint) Name() string {
return "Random"
}

func (f randomFailpoint) Available(e2e.EtcdProcessClusterConfig, e2e.EtcdProcess) bool {
return true
}

type blackholePeerNetworkFailpoint struct {
triggerBlackhole
}
Expand Down
24 changes: 15 additions & 9 deletions tests/robustness/linearizability_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,8 @@ func TestRobustness(t *testing.T) {
scenarios := []testScenario{}
for _, traffic := range []traffic.Config{traffic.LowTraffic, traffic.HighTraffic, traffic.KubernetesTraffic} {
scenarios = append(scenarios, testScenario{
name: traffic.Name + "ClusterOfSize1",
failpoint: RandomFailpoint,
traffic: traffic,
name: traffic.Name + "ClusterOfSize1",
traffic: traffic,
cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithSnapshotCount(100),
Expand All @@ -62,10 +61,9 @@ func TestRobustness(t *testing.T) {
clusterOfSize3Options = append(clusterOfSize3Options, e2e.WithSnapshotCatchUpEntries(100))
}
scenarios = append(scenarios, testScenario{
name: traffic.Name + "ClusterOfSize3",
failpoint: RandomFailpoint,
traffic: traffic,
cluster: *e2e.NewConfig(clusterOfSize3Options...),
name: traffic.Name + "ClusterOfSize3",
traffic: traffic,
cluster: *e2e.NewConfig(clusterOfSize3Options...),
})
}
scenarios = append(scenarios, testScenario{
Expand Down Expand Up @@ -93,8 +91,7 @@ func TestRobustness(t *testing.T) {
),
})
scenarios = append(scenarios, testScenario{
name: "Issue15220",
failpoint: RandomFailpoint,
name: "Issue15220",
watch: watchConfig{
requestProgress: true,
},
Expand Down Expand Up @@ -145,6 +142,15 @@ func testRobustness(ctx context.Context, t *testing.T, lg *zap.Logger, s testSce
}
defer r.clus.Close()

if s.failpoint == nil {
s.failpoint = pickRandomFailpoint(t, r.clus)
} else {
err = validateFailpoint(r.clus, s.failpoint)
if err != nil {
t.Fatal(err)
}
}

// t.Failed() returns false during panicking. We need to forcibly
// save data on panicking.
// Refer to: https://github.com/golang/go/issues/49929
Expand Down