diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go index 4aac7e2c8248..064a0561144c 100644 --- a/tests/integration/cluster_test.go +++ b/tests/integration/cluster_test.go @@ -208,9 +208,8 @@ func TestIssue2681(t *testing.T) { c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true}) defer c.Terminate(t) - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())); err != nil { - t.Fatal(err) - } + err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())) + require.NoError(t, err) c.WaitMembersForLeader(t, c.Members) c.AddMember(t) @@ -234,9 +233,8 @@ func testIssue2746(t *testing.T, members int) { clusterMustProgress(t, c.Members) } - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())); err != nil { - t.Fatal(err) - } + err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())) + require.NoError(t, err) c.WaitMembersForLeader(t, c.Members) c.AddMember(t) @@ -312,9 +310,8 @@ func TestIssue3699(t *testing.T) { t.Logf("Restarting member '0'...") // bring back node a // node a will remain useless as long as d is the leader. - if err := c.Members[0].Restart(t); err != nil { - t.Fatal(err) - } + err := c.Members[0].Restart(t) + require.NoError(t, err) t.Logf("Restarted member '0'.") select { @@ -530,9 +527,7 @@ func TestConcurrentRemoveMember(t *testing.T) { defer c.Terminate(t) addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) removeID := addResp.Member.ID done := make(chan struct{}) go func() { @@ -540,9 +535,8 @@ func TestConcurrentRemoveMember(t *testing.T) { c.Members[0].Client.MemberRemove(context.Background(), removeID) close(done) }() - if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil { - t.Fatal(err) - } + _, err = c.Members[0].Client.MemberRemove(context.Background(), removeID) + require.NoError(t, err) <-done } @@ -552,9 +546,7 @@ func TestConcurrentMoveLeader(t *testing.T) { defer c.Terminate(t) addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) removeID := addResp.Member.ID done := make(chan struct{}) go func() { @@ -562,8 +554,7 @@ func TestConcurrentMoveLeader(t *testing.T) { c.Members[0].Client.MoveLeader(context.Background(), removeID) close(done) }() - if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil { - t.Fatal(err) - } + _, err = c.Members[0].Client.MemberRemove(context.Background(), removeID) + require.NoError(t, err) <-done } diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go index 7061ed61e63d..e3b762c31efc 100644 --- a/tests/integration/grpc_test.go +++ b/tests/integration/grpc_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" clientv3 "go.etcd.io/etcd/client/v3" @@ -105,9 +106,7 @@ func TestAuthority(t *testing.T) { putRequestMethod := "/etcdserverpb.KV/Put" for i := 0; i < 100; i++ { _, err := kv.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } assertAuthority(t, tc.expectAuthorityPattern, clus, putRequestMethod) @@ -121,9 +120,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integra if useTLS { cfg.ClientTLS = &integration.TestTLSInfo tlsConfig, err := integration.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return cfg, tlsConfig } return cfg, nil @@ -138,9 +135,7 @@ func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster DialOptions: []grpc.DialOption{grpc.WithBlock()}, TLS: tlsConfig, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return kv } diff --git a/tests/integration/hashkv_test.go b/tests/integration/hashkv_test.go index cbc83d0159eb..26d5dfaf0409 100644 --- a/tests/integration/hashkv_test.go +++ b/tests/integration/hashkv_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" @@ -36,9 +38,7 @@ func TestCompactionHash(t *testing.T) { defer clus.Terminate(t) cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{ Transport: &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go index efd6598f684d..b012370ab2c4 100644 --- a/tests/integration/member_test.go +++ b/tests/integration/member_test.go @@ -58,9 +58,7 @@ func TestRestartMember(t *testing.T) { c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) err := c.Members[i].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go index e6f5a3f3e9c0..652856ebb4fc 100644 --- a/tests/integration/metrics_test.go +++ b/tests/integration/metrics_test.go @@ -38,9 +38,7 @@ func TestMetricDbSizeBoot(t *testing.T) { defer clus.Terminate(t) v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if v == "0" { t.Fatalf("expected non-zero, got %q", v) @@ -74,40 +72,29 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { expected := numPuts * len(putreq.Value) beforeDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) bv, err := strconv.Atoi(beforeDefrag) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if bv < expected { t.Fatalf("expected db size greater than %d, got %d", expected, bv) } beforeDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) biu, err := strconv.Atoi(beforeDefragInUse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if biu < expected { t.Fatalf("expected db size in use is greater than %d, got %d", expected, biu) } // clear out historical keys, in use bytes should free pages creq := &pb.CompactionRequest{Revision: int64(numPuts), Physical: true} - if _, kerr := kvc.Compact(context.TODO(), creq); kerr != nil { - t.Fatal(kerr) - } + _, kerr := kvc.Compact(context.TODO(), creq) + require.NoError(t, kerr) validateAfterCompactionInUse := func() error { // Put to move PendingPages to FreePages _, verr := kvc.Put(context.TODO(), putreq) - if verr != nil { - t.Fatal(verr) - } + require.NoError(t, verr) time.Sleep(500 * time.Millisecond) afterCompactionInUse, verr := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") @@ -142,25 +129,17 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { mc.Defragment(context.TODO(), &pb.DefragmentRequest{}) afterDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) av, err := strconv.Atoi(afterDefrag) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if bv <= av { t.Fatalf("expected less than %d, got %d after defrag", bv, av) } afterDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) adiu, err := strconv.Atoi(afterDefragInUse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if adiu > av { t.Fatalf("db size in use (%d) is expected less than db size (%d) after defrag", adiu, av) } @@ -172,13 +151,9 @@ func TestMetricQuotaBackendBytes(t *testing.T) { defer clus.Terminate(t) qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) qv, err := strconv.ParseFloat(qs, 64) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if int64(qv) != storage.DefaultQuotaBytes { t.Fatalf("expected %d, got %f", storage.DefaultQuotaBytes, qv) } @@ -190,9 +165,7 @@ func TestMetricsHealth(t *testing.T) { defer clus.Terminate(t) tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) u := clus.Members[0].ClientURLs[0] u.Path = "/health" resp, err := tr.RoundTrip(&http.Request{ @@ -201,14 +174,10 @@ func TestMetricsHealth(t *testing.T) { URL: &u, }) resp.Body.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) hv, err := clus.Members[0].Metric("etcd_server_health_failures") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if hv != "0" { t.Fatalf("expected '0' from etcd_server_health_failures, got %q", hv) } diff --git a/tests/integration/revision_test.go b/tests/integration/revision_test.go index e5c1d270fac0..fd53cfb37b41 100644 --- a/tests/integration/revision_test.go +++ b/tests/integration/revision_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc/status" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -95,7 +97,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, wg.Add(1) go func() { defer wg.Done() - getWorker(ctx, t, clus) + getWorker(ctx, t, clus) //nolint:testifylint }() } @@ -103,9 +105,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, wg.Wait() kv := clus.Client(0) resp, err := kv.Get(context.Background(), "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Revision %d", resp.Header.Revision) } @@ -116,9 +116,7 @@ func putWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) { if errors.Is(err, context.DeadlineExceeded) { return } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } + assert.NoError(t, silenceConnectionErrors(err)) } } @@ -130,9 +128,7 @@ func getWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) { if errors.Is(err, context.DeadlineExceeded) { return } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } + require.NoError(t, silenceConnectionErrors(err)) if resp == nil { continue } diff --git a/tests/integration/tracing_test.go b/tests/integration/tracing_test.go index 5b87208848a9..d255e958308d 100644 --- a/tests/integration/tracing_test.go +++ b/tests/integration/tracing_test.go @@ -40,9 +40,7 @@ func TestTracing(t *testing.T) { "Wal creation tests are depending on embedded etcd server so are integration-level tests.") // set up trace collector listener, err := net.Listen("tcp", "localhost:") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) traceFound := make(chan struct{}) defer close(traceFound) @@ -64,9 +62,7 @@ func TestTracing(t *testing.T) { // start an etcd instance with tracing enabled etcdSrv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer etcdSrv.Close() select { diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go index 33e1b0aecd49..d3646735d653 100644 --- a/tests/integration/utl_wal_version_test.go +++ b/tests/integration/utl_wal_version_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/zap" "go.etcd.io/etcd/client/pkg/v3/testutil" @@ -37,9 +38,7 @@ func TestEtcdVersionFromWAL(t *testing.T) { "Wal creation tests are depending on embedded etcd server so are integration-level tests.") cfg := integration.NewEmbedConfig(t, "default") srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-srv.Server.ReadyNotify(): case <-time.After(3 * time.Second): @@ -76,15 +75,11 @@ func TestEtcdVersionFromWAL(t *testing.T) { srv.Close() w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer w.Close() walVersion, err := wal.ReadWALVersion(w) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion()) } diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go index 50a701e68095..4b1218caca79 100644 --- a/tests/integration/v3_alarm_test.go +++ b/tests/integration/v3_alarm_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -63,15 +64,11 @@ func TestV3StorageQuotaApply(t *testing.T) { // test big put bigbuf := make([]byte, quotasize) _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // quorum get should work regardless of whether alarm is raised _, err = kvc0.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // wait until alarm is raised for sure-- poll the alarms stopc := time.After(5 * time.Second) @@ -111,9 +108,7 @@ func TestV3StorageQuotaApply(t *testing.T) { }, }, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout) defer cancel() @@ -152,25 +147,22 @@ func TestV3AlarmDeactivate(t *testing.T) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - if _, err := mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } + _, err := mt.Alarm(context.TODO(), alarmReq) + require.NoError(t, err) key := []byte("abc") smallbuf := make([]byte, 512) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) if err == nil && !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { t.Fatalf("put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) } alarmReq.Action = pb.AlarmRequest_DEACTIVATE - if _, err = mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } + _, err = mt.Alarm(context.TODO(), alarmReq) + require.NoError(t, err) - if _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + require.NoError(t, err) } func TestV3CorruptAlarm(t *testing.T) { @@ -209,15 +201,12 @@ func TestV3CorruptAlarm(t *testing.T) { time.Sleep(time.Second * 2) // Wait for cluster so Puts succeed in case member 0 was the leader. - if _, err := clus.Client(1).Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "xyz", "321"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "abc", "fed"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(1).Get(context.TODO(), "k") + require.NoError(t, err) + _, err = clus.Client(1).Put(context.TODO(), "xyz", "321") + require.NoError(t, err) + _, err = clus.Client(1).Put(context.TODO(), "abc", "fed") + require.NoError(t, err) // Restart with corruption checking enabled. clus.Members[1].Stop(t) @@ -231,14 +220,10 @@ func TestV3CorruptAlarm(t *testing.T) { clus.Members[0].WaitStarted(t) resp0, err0 := clus.Client(0).Get(context.TODO(), "abc") - if err0 != nil { - t.Fatal(err0) - } + require.NoError(t, err0) clus.Members[1].WaitStarted(t) resp1, err1 := clus.Client(1).Get(context.TODO(), "abc") - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) if resp0.Kvs[0].ModRevision == resp1.Kvs[0].ModRevision { t.Fatalf("matching ModRevision values") @@ -290,9 +275,7 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { } } - if err = clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil { - t.Fatal(err) - } + require.NoError(t, clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID()))) clus.WaitMembersForLeader(t, clus.Members) clus.AddMember(t) @@ -314,30 +297,20 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { schema.MustUnsafePutLease(tx, &lpb) tx.Commit() - if err = be.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, be.Close()) - if err = clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } + require.NoError(t, clus.Members[2].Restart(t)) clus.Members[1].WaitOK(t) clus.Members[2].WaitOK(t) // Revoke lease should remove key except the member with corruption _, err = integration.ToGRPC(clus.Members[0].Client).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp0, err0 := clus.Members[1].Client.KV.Get(context.TODO(), "foo") - if err0 != nil { - t.Fatal(err0) - } + require.NoError(t, err0) resp1, err1 := clus.Members[2].Client.KV.Get(context.TODO(), "foo") - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) if resp0.Header.Revision == resp1.Header.Revision { t.Fatalf("matching Revision values") diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go index d62ea7a03f18..f19325049a63 100644 --- a/tests/integration/v3_auth_test.go +++ b/tests/integration/v3_auth_test.go @@ -101,9 +101,8 @@ func TestV3AuthTokenWithDisable(t *testing.T) { }() time.Sleep(10 * time.Millisecond) - if _, err := c.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err := c.AuthDisable(context.TODO()) + require.NoError(t, err) time.Sleep(10 * time.Millisecond) cancel() @@ -168,14 +167,11 @@ func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterCon defer rootc.Close() leaseResp, err := rootc.Grant(context.TODO(), 2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID - if _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)); err != nil { - t.Fatal(err) - } + _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)) + require.NoError(t, err) // wait for lease expire time.Sleep(3 * time.Second) @@ -229,15 +225,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { defer rootc.Close() leaseResp, err := rootc.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID // permission of k3 isn't granted to user1 _, err = rootc.Put(context.TODO(), "k3", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { @@ -288,31 +280,21 @@ func TestV3AuthWithLeaseAttach(t *testing.T) { defer user2c.Close() leaseResp, err := user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID // permission of k2 is also granted to user2 _, err = user1c.Put(context.TODO(), "k2", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = user2c.Revoke(context.TODO(), leaseID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseResp, err = user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID = leaseResp.ID // permission of k1 isn't granted to user2 _, err = user1c.Put(context.TODO(), "k1", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = user2c.Revoke(context.TODO(), leaseID) if err == nil { @@ -353,9 +335,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) { }, } authSetupUsers(t, auth, root) - if _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}); err != nil { - t.Fatal(err) - } + _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}) + require.NoError(t, err) } func TestV3AuthNonAuthorizedRPCs(t *testing.T) { diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go index 350bfb354dd2..b49097ca8610 100644 --- a/tests/integration/v3_election_test.go +++ b/tests/integration/v3_election_test.go @@ -155,9 +155,8 @@ func TestElectionFailover(t *testing.T) { }() // invoke leader failover - if err := ss[0].Close(); err != nil { - t.Fatal(err) - } + err := ss[0].Close() + require.NoError(t, err) // check new leader e = concurrency.NewElection(ss[2], "test-election") @@ -192,13 +191,11 @@ func TestElectionSessionRecampaign(t *testing.T) { defer session.Orphan() e := concurrency.NewElection(session, "test-elect") - if err := e.Campaign(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + err = e.Campaign(context.TODO(), "abc") + require.NoError(t, err) e2 := concurrency.NewElection(session, "test-elect") - if err := e2.Campaign(context.TODO(), "def"); err != nil { - t.Fatal(err) - } + err = e2.Campaign(context.TODO(), "def") + require.NoError(t, err) ctx, cancel := context.WithCancel(context.TODO()) defer cancel() @@ -217,22 +214,19 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) { defer clus.Terminate(t) cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "testa", "value"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "testa", "value") + require.NoError(t, err) s, serr := concurrency.NewSession(cli) if serr != nil { t.Fatal(serr) } e := concurrency.NewElection(s, "test") ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - err := e.Campaign(ctx, "abc") + err = e.Campaign(ctx, "abc") cancel() - if err != nil { - // after 5 seconds, deadlock results in - // 'context deadline exceeded' here. - t.Fatal(err) - } + // after 5 seconds, deadlock results in + // 'context deadline exceeded' here. + require.NoError(t, err) } // TestElectionOnSessionRestart tests that a quick restart of leader (resulting @@ -245,9 +239,7 @@ func TestElectionOnSessionRestart(t *testing.T) { cli := clus.RandClient() session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) e := concurrency.NewElection(session, "test-elect") if cerr := e.Campaign(context.TODO(), "abc"); cerr != nil { @@ -293,9 +285,7 @@ func TestElectionObserveCompacted(t *testing.T) { cli := clus.Client(0) session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer session.Orphan() e := concurrency.NewElection(session, "test-elect") diff --git a/tests/integration/v3_failover_test.go b/tests/integration/v3_failover_test.go index b4ce09e1a38a..c3cce80fb2a5 100644 --- a/tests/integration/v3_failover_test.go +++ b/tests/integration/v3_failover_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -56,9 +57,7 @@ func TestFailover(t *testing.T) { defer clus.Terminate(t) cc, err := integration2.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create an etcd client before or after first server down t.Logf("Creating an etcd client [%s]", tc.name) cli, err := tc.testFunc(t, cc, clus) diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go index 7968e614edc7..3d8b446e59bc 100644 --- a/tests/integration/v3_grpc_inflight_test.go +++ b/tests/integration/v3_grpc_inflight_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -38,9 +39,8 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { cli := clus.RandClient() kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -69,9 +69,8 @@ func TestV3KVInflightRangeRequests(t *testing.T) { cli := clus.RandClient() kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go index 999d28ba8d9a..8396ffa18e6c 100644 --- a/tests/integration/v3_grpc_test.go +++ b/tests/integration/v3_grpc_test.go @@ -135,11 +135,10 @@ func TestV3CompactCurrentRev(t *testing.T) { } } // get key to add to proxy cache, if any - if _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) + require.NoError(t, err) // compact on current revision - _, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) + _, err = kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) if err != nil { t.Fatalf("couldn't compact kv space (%v)", err) } @@ -166,15 +165,11 @@ func TestV3HashKV(t *testing.T) { for i := 0; i < 10; i++ { resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rev := resp.Header.Revision hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if rev != hresp.Header.Revision { t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) } @@ -183,9 +178,7 @@ func TestV3HashKV(t *testing.T) { prevCompactRev := hresp.CompactRevision for i := 0; i < 10; i++ { hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if rev != hresp.Header.Revision { t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) } @@ -416,16 +409,12 @@ func TestV3TxnRevision(t *testing.T) { kvc := integration.ToGRPC(clus.RandClient()).KV pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")} presp, err := kvc.Put(context.TODO(), pr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: &pb.RangeRequest{Key: []byte("abc")}}} txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // did not update revision if presp.Header.Revision != tresp.Header.Revision { @@ -435,9 +424,7 @@ func TestV3TxnRevision(t *testing.T) { txndr := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte("def")}}} txn = &pb.TxnRequest{Success: []*pb.RequestOp{txndr}} tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // did not update revision if presp.Header.Revision != tresp.Header.Revision { @@ -447,9 +434,7 @@ func TestV3TxnRevision(t *testing.T) { txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("123")}}} txn = &pb.TxnRequest{Success: []*pb.RequestOp{txnput}} tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // updated revision if tresp.Header.Revision != presp.Header.Revision+1 { @@ -496,9 +481,7 @@ func TestV3TxnCmpHeaderRev(t *testing.T) { txn.Compare = append(txn.Compare, cmp) tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) prev := <-revc err = <-errCh @@ -615,9 +598,7 @@ func TestV3TxnRangeCompare(t *testing.T) { txn := &pb.TxnRequest{} txn.Compare = append(txn.Compare, &tt.cmp) tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if tt.wSuccess != tresp.Succeeded { t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded) } @@ -664,9 +645,7 @@ func TestV3TxnNestedPath(t *testing.T) { } tresp, err := kvc.Txn(context.TODO(), topTxn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) curTxnResp := tresp for i := range txnPath { @@ -691,9 +670,7 @@ func TestV3PutIgnoreValue(t *testing.T) { // create lease lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -824,9 +801,7 @@ func TestV3PutIgnoreLease(t *testing.T) { // create lease lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -1294,13 +1269,12 @@ func TestV3StorageQuotaAPI(t *testing.T) { // test small put that fits in quota smallbuf := make([]byte, 512) - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + require.NoError(t, err) // test big put bigbuf := make([]byte, quotasize) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) } diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go index eacb821af60c..1a5e7ce5ea2d 100644 --- a/tests/integration/v3_kv_test.go +++ b/tests/integration/v3_kv_test.go @@ -18,6 +18,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -33,23 +35,15 @@ func TestKVWithEmptyValue(t *testing.T) { client := clus.RandClient() _, err := client.Put(context.Background(), "my-namespace/foobar", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = client.Put(context.Background(), "my-namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = client.Put(context.Background(), "namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Range over all keys. resp, err := client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, kv := range resp.Kvs { t.Log(string(kv.Key), "=", string(kv.Value)) } @@ -57,24 +51,18 @@ func TestKVWithEmptyValue(t *testing.T) { // Range over all keys in a namespace. client.KV = namespace.NewKV(client.KV, "my-namespace/") resp, err = client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, kv := range resp.Kvs { t.Log(string(kv.Key), "=", string(kv.Value)) } // Remove all keys without WithFromKey/WithPrefix func _, err = client.Delete(context.Background(), "") - if err == nil { - // fatal error duo to without WithFromKey/WithPrefix func called. - t.Fatal(err) - } + // fatal error duo to without WithFromKey/WithPrefix func called. + require.Error(t, err) respDel, err := client.Delete(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - // fatal error duo to with WithFromKey/WithPrefix func called. - t.Fatal(err) - } + // fatal error duo to with WithFromKey/WithPrefix func called. + require.NoError(t, err) t.Logf("delete keys:%d", respDel.Deleted) } diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go index 2054acbade60..ea2b730c6189 100644 --- a/tests/integration/v3_leadership_test.go +++ b/tests/integration/v3_leadership_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -59,15 +60,11 @@ func testMoveLeader(t *testing.T, auto bool) { target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.MemberID()) if auto { err := clus.Members[oldLeadIdx].Server.TryTransferLeadershipOnShutdown() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } else { mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // wait until leader transitions have happened diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go index 3c5e9040b4a5..6e5cff739f37 100644 --- a/tests/integration/v3_lease_test.go +++ b/tests/integration/v3_lease_test.go @@ -51,9 +51,7 @@ func TestV3LeasePromote(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) ttl := time.Duration(lresp.TTL) * time.Second afterGrant := time.Now() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -203,9 +201,8 @@ func TestV3LeaseNegativeID(t *testing.T) { time.Sleep(100 * time.Millisecond) // restore lessor from db file clus.Members[2].Stop(t) - if err = clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } + err = clus.Members[2].Restart(t) + require.NoError(t, err) // revoke lease should remove key integration.WaitClientV3(t, clus.Members[2].Client) @@ -217,9 +214,7 @@ func TestV3LeaseNegativeID(t *testing.T) { for _, m := range clus.Members { getr := &pb.RangeRequest{Key: tc.k} getresp, err := integration.ToGRPC(m.Client).KV.Range(ctx, getr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if revision == 0 { revision = getresp.Header.Revision } @@ -386,9 +381,7 @@ func TestV3LeaseCheckpoint(t *testing.T) { defer cancel() c := integration.ToGRPC(clus.RandClient()) lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: int64(tc.ttl.Seconds())}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < tc.leaderChanges; i++ { // wait for a checkpoint to occur @@ -442,9 +435,7 @@ func TestV3LeaseExists(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -469,9 +460,7 @@ func TestV3LeaseLeases(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -481,9 +470,7 @@ func TestV3LeaseLeases(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases( context.Background(), &pb.LeaseLeasesRequest{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := range lresp.Leases { if lresp.Leases[i].ID != ids[i] { t.Fatalf("#%d: lease ID expected %d, got %d", i, ids[i], lresp.Leases[i].ID) @@ -530,9 +517,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient if useClusterClient { clusterClient, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 300; i++ { go func() { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }() } @@ -630,9 +615,7 @@ func TestV3GetNonExistLease(t *testing.T) { t.Errorf("failed to create lease %v", err) } _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseTTLr := &pb.LeaseTimeToLiveRequest{ ID: lresp.ID, @@ -665,49 +648,33 @@ func TestV3LeaseSwitch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) // attach key on lease1 then switch it to lease2 put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID} _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID} _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // revoke lease1 should not remove key _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rreq := &pb.RangeRequest{Key: []byte("foo")} rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("unexpect removal of key") } // revoke lease2 should remove key _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -728,9 +695,7 @@ func TestV3LeaseFailover(t *testing.T) { // create lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -745,9 +710,7 @@ func TestV3LeaseFailover(t *testing.T) { ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // send keep alive to old leader until the old leader starts // to drop lease request. @@ -792,9 +755,7 @@ func TestV3LeaseRequireLeader(t *testing.T) { ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -827,16 +788,12 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure lease still exists clus.Members[0].Stop(t) @@ -846,22 +803,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke should delete the key _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -878,22 +829,16 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // revoke should delete the key _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure revoked key doesn't exist clus.Members[0].Stop(t) @@ -903,16 +848,12 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV defer nc.Close() rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -930,22 +871,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // overwrite lease with none _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure lease still exists clus.Members[0].Stop(t) @@ -955,22 +890,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the detached lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("only detached lease removed, key should remain") } @@ -988,18 +917,14 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { var leaseIDs []int64 for i := 0; i < 2; i++ { lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } leaseIDs = append(leaseIDs, lresp.ID) _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // restart server and ensure lease still exists @@ -1015,36 +940,26 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the old lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[0]}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // key should still exist rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("only detached lease removed, key should remain") } // revoke the latest lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[1]}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err = kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -1149,20 +1064,15 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.Cluster, int64 defer clus.Terminate(t) leaseID, err := acquireLeaseAndKey(clus, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err = act(clus, leaseID); err != nil { - t.Fatal(err) - } + err = act(clus, leaseID) + require.NoError(t, err) // confirm no key rreq := &pb.RangeRequest{Key: []byte("foo")} rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go index 5880009d58ba..a7c0781f0ded 100644 --- a/tests/integration/v3_stm_test.go +++ b/tests/integration/v3_stm_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v3 "go.etcd.io/etcd/client/v3" @@ -230,15 +231,13 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { defer clus.Terminate(t) etcdc := clus.RandClient() - if _, err := etcdc.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := etcdc.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) donec, readyc := make(chan struct{}), make(chan struct{}) go func() { <-readyc - if _, err := etcdc.Delete(context.TODO(), "foo"); err != nil { - t.Error(err) - } + _, derr := etcdc.Delete(context.TODO(), "foo") + assert.NoError(t, derr) close(donec) }() @@ -256,9 +255,9 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { } iso := concurrency.WithIsolation(concurrency.RepeatableReads) - if _, err := concurrency.NewSTM(etcdc, applyf, iso); err != nil { - t.Fatalf("error on stm txn (%v)", err) - } + _, err = concurrency.NewSTM(etcdc, applyf, iso) + require.NoErrorf(t, err, "error on stm txn") + if try != 2 { t.Fatalf("STM apply expected to run twice, got %d", try) } diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go index 81601d18a5f9..ea39edd46b61 100644 --- a/tests/integration/v3_tls_test.go +++ b/tests/integration/v3_tls_test.go @@ -62,9 +62,7 @@ func testTLSCipherSuites(t *testing.T, valid bool) { defer clus.Terminate(t) cc, err := cliTLS.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL}, DialTimeout: time.Second, diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go index b3fc8236f784..9d98cb8dcef4 100644 --- a/tests/integration/v3_watch_restore_test.go +++ b/tests/integration/v3_watch_restore_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/tests/v3/framework/config" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -65,9 +67,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx) - if errW != nil { - t.Fatal(errW) - } + require.NoError(t, errW) if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}, }}); err != nil { diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go index d9bc3e70eabe..fcb2ab36edf3 100644 --- a/tests/integration/v3_watch_test.go +++ b/tests/integration/v3_watch_test.go @@ -594,19 +594,15 @@ func TestV3WatchEmptyKey(t *testing.T) { Key: []byte("foo"), }, }} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } + require.NoError(t, ws.Send(req)) + _, err := ws.Recv() + require.NoError(t, err) // put a key with empty value kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), preq) + require.NoError(t, err) // check received PUT resp, rerr := ws.Recv() @@ -1240,12 +1236,9 @@ func TestV3WatchWithFilter(t *testing.T) { Filters: []pb.WatchCreateRequest_FilterType{pb.WatchCreateRequest_NOPUT}, }, }} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } + require.NoError(t, ws.Send(req)) + _, err := ws.Recv() + require.NoError(t, err) recv := make(chan *pb.WatchResponse, 1) go func() { @@ -1260,9 +1253,8 @@ func TestV3WatchWithFilter(t *testing.T) { // put a key with empty value kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), preq) + require.NoError(t, err) select { case <-recv: @@ -1271,9 +1263,8 @@ func TestV3WatchWithFilter(t *testing.T) { } dreq := &pb.DeleteRangeRequest{Key: []byte("foo")} - if _, err := kvc.DeleteRange(context.TODO(), dreq); err != nil { - t.Fatal(err) - } + _, err = kvc.DeleteRange(context.TODO(), dreq) + require.NoError(t, err) select { case resp := <-recv: @@ -1386,9 +1377,7 @@ func TestV3WatchCancellation(t *testing.T) { time.Sleep(3 * time.Second) minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var expected string if integration.ThroughProxy { @@ -1425,9 +1414,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) { time.Sleep(3 * time.Second) minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var expected string if integration.ThroughProxy { diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go index d0ca72b42555..cf5c7cb3427e 100644 --- a/tests/integration/v3election_grpc_test.go +++ b/tests/integration/v3election_grpc_test.go @@ -35,13 +35,9 @@ func TestV3ElectionCampaign(t *testing.T) { defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} @@ -129,13 +125,9 @@ func TestV3ElectionObserve(t *testing.T) { } lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) c1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("0")}) - if cerr1 != nil { - t.Fatal(cerr1) - } + require.NoError(t, cerr1) // overlap other leader so it waits on resign leader2c := make(chan struct{}) diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go index f293bc1a556d..fe2e161cded5 100644 --- a/tests/integration/v3lock_grpc_test.go +++ b/tests/integration/v3lock_grpc_test.go @@ -19,6 +19,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -32,13 +34,9 @@ func TestV3LockLockWaiter(t *testing.T) { defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Lock l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID})