Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests/common, tests/integration: Address tests var-naming lint rule #17635

Merged
merged 2 commits into from
Mar 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions tests/common/auth_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -600,25 +600,25 @@ func TestAuthMemberRemove(t *testing.T) {
require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))

memberId, clusterId := memberToRemove(ctx, t, rootAuthClient, clusterSize)
delete(memberIDToEndpoints, memberId)
memberID, clusterID := memberToRemove(ctx, t, rootAuthClient, clusterSize)
delete(memberIDToEndpoints, memberID)
endpoints := make([]string, 0, len(memberIDToEndpoints))
for _, ep := range memberIDToEndpoints {
endpoints = append(endpoints, ep)
}
testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
// ordinary user cannot remove a member
_, err := testUserAuthClient.MemberRemove(ctx, memberId)
_, err := testUserAuthClient.MemberRemove(ctx, memberID)
require.ErrorContains(t, err, PermissionDenied)

// root can remove a member, building a client excluding removed member endpoint
rootAuthClient2 := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword), WithEndpoints(endpoints)))
resp, err := rootAuthClient2.MemberRemove(ctx, memberId)
resp, err := rootAuthClient2.MemberRemove(ctx, memberID)
require.NoError(t, err)
require.Equal(t, resp.Header.ClusterId, clusterId)
require.Equal(t, resp.Header.ClusterId, clusterID)
found := false
for _, member := range resp.Members {
if member.ID == memberId {
if member.ID == memberID {
found = true
break
}
Expand Down
26 changes: 13 additions & 13 deletions tests/common/member_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func TestMemberList(t *testing.T) {
}
for _, m := range resp.Members {
if len(m.ClientURLs) == 0 {
t.Logf("member is not started, memberId:%d, memberName:%s", m.ID, m.Name)
t.Logf("member is not started, memberID:%d, memberName:%s", m.ID, m.Name)
return false
}
}
Expand Down Expand Up @@ -210,8 +210,8 @@ func TestMemberRemove(t *testing.T) {
time.Sleep(etcdserver.HealthInterval)
}

memberId, clusterId := memberToRemove(ctx, t, cc, c.ClusterSize)
removeResp, err := cc.MemberRemove(ctx, memberId)
memberID, clusterID := memberToRemove(ctx, t, cc, c.ClusterSize)
removeResp, err := cc.MemberRemove(ctx, memberID)

if c.ClusterSize == 1 && quorumTc.expectSingleNodeError {
require.ErrorContains(t, err, "etcdserver: re-configuration failed due to not enough started members")
Expand All @@ -225,15 +225,15 @@ func TestMemberRemove(t *testing.T) {

require.NoError(t, err, "MemberRemove failed")
t.Logf("removeResp.Members:%v", removeResp.Members)
if removeResp.Header.ClusterId != clusterId {
t.Fatalf("MemberRemove failed, expected ClusterId: %d, got: %d", clusterId, removeResp.Header.ClusterId)
if removeResp.Header.ClusterId != clusterID {
t.Fatalf("MemberRemove failed, expected ClusterID: %d, got: %d", clusterID, removeResp.Header.ClusterId)
}
if len(removeResp.Members) != c.ClusterSize-1 {
t.Fatalf("MemberRemove failed, expected length of members: %d, got: %d", c.ClusterSize-1, len(removeResp.Members))
}
for _, m := range removeResp.Members {
if m.ID == memberId {
t.Fatalf("MemberRemove failed, member(id=%d) is still in cluster", memberId)
if m.ID == memberID {
t.Fatalf("MemberRemove failed, member(id=%d) is still in cluster", memberID)
}
}
})
Expand All @@ -246,15 +246,15 @@ func TestMemberRemove(t *testing.T) {
// If clusterSize == 1, return the only member.
// Otherwise, return a member that client has not connected to.
// It ensures that `MemberRemove` function does not return an "etcdserver: server stopped" error.
func memberToRemove(ctx context.Context, t *testing.T, client intf.Client, clusterSize int) (memberId uint64, clusterId uint64) {
func memberToRemove(ctx context.Context, t *testing.T, client intf.Client, clusterSize int) (memberID uint64, clusterID uint64) {
listResp, err := client.MemberList(ctx, false)
if err != nil {
t.Fatal(err)
}

clusterId = listResp.Header.ClusterId
clusterID = listResp.Header.ClusterId
if clusterSize == 1 {
memberId = listResp.Members[0].ID
memberID = listResp.Members[0].ID
} else {
// get status of the specific member that client has connected to
statusResp, err := client.Status(ctx)
Expand All @@ -265,15 +265,15 @@ func memberToRemove(ctx context.Context, t *testing.T, client intf.Client, clust
// choose a member that client has not connected to
for _, m := range listResp.Members {
if m.ID != statusResp[0].Header.MemberId {
memberId = m.ID
memberID = m.ID
break
}
}
if memberId == 0 {
if memberID == 0 {
t.Fatalf("memberToRemove failed. listResp:%v, statusResp:%v", listResp, statusResp)
}
}
return memberId, clusterId
return memberID, clusterID
}

func getMemberIDToEndpoints(ctx context.Context, t *testing.T, clus intf.Cluster) (memberIDToEndpoints map[uint64]string) {
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/clientv3/concurrency/session_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ func TestSessionTTLOptions(t *testing.T) {
}
defer s.Close()

leaseId := s.Lease()
leaseID := s.Lease()
// TTL retrieved should be less than the set TTL, but not equal to default:60 or exprired:-1
resp, err := cli.Lease.TimeToLive(context.Background(), leaseId)
resp, err := cli.Lease.TimeToLive(context.Background(), leaseID)
if err != nil {
t.Log(err)
}
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/clientv3/naming/resolver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import (
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)

func testEtcdGrpcResolver(t *testing.T, lbPolicy string) {
func testEtcdGRPCResolver(t *testing.T, lbPolicy string) {

// Setup two new dummy stub servers
payloadBody := []byte{'1'}
Expand Down Expand Up @@ -137,7 +137,7 @@ func TestEtcdGrpcResolverPickFirst(t *testing.T) {
integration2.BeforeTest(t)

// Pick first is the default load balancer policy for grpc-go
testEtcdGrpcResolver(t, "pick_first")
testEtcdGRPCResolver(t, "pick_first")
}

// TestEtcdGrpcResolverRoundRobin mimics scenarios described in grpc_naming.md doc.
Expand All @@ -146,7 +146,7 @@ func TestEtcdGrpcResolverRoundRobin(t *testing.T) {
integration2.BeforeTest(t)

// Round robin is a common alternative for more production oriented scenarios
testEtcdGrpcResolver(t, "round_robin")
testEtcdGRPCResolver(t, "round_robin")
}

func TestEtcdEndpointManager(t *testing.T) {
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/embed/embed_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ func TestEmbedEtcdAutoCompactionRetentionRetained(t *testing.T) {
e, err := embed.StartEtcd(cfg)
assert.NoError(t, err)
autoCompactionRetention := e.Server.Cfg.AutoCompactionRetention
duration_to_compare, _ := time.ParseDuration("2h0m0s")
assert.Equal(t, duration_to_compare, autoCompactionRetention)
durationToCompare, _ := time.ParseDuration("2h0m0s")
assert.Equal(t, durationToCompare, autoCompactionRetention)
e.Close()
}
4 changes: 2 additions & 2 deletions tests/integration/v3_leadership_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,14 +195,14 @@ func TestFirstCommitNotification(t *testing.T) {
oldLeaderClient := cluster.Client(oldLeaderIdx)

newLeaderIdx := (oldLeaderIdx + 1) % clusterSize
newLeaderId := uint64(cluster.Members[newLeaderIdx].ID())
newLeaderID := uint64(cluster.Members[newLeaderIdx].ID())

notifiers := make(map[int]<-chan struct{}, clusterSize)
for i, clusterMember := range cluster.Members {
notifiers[i] = clusterMember.Server.FirstCommitInTermNotify()
}

_, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderId)
_, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderID)

if err != nil {
t.Errorf("got error during leadership transfer: %v", err)
Expand Down
8 changes: 4 additions & 4 deletions tests/integration/v3_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,15 +389,15 @@ func TestV3LeaseCheckpoint(t *testing.T) {
time.Sleep(tc.checkpointingInterval + 1*time.Second)

// Force a leader election
leaderId := clus.WaitLeader(t)
leader := clus.Members[leaderId]
leaderID := clus.WaitLeader(t)
leader := clus.Members[leaderID]
leader.Stop(t)
time.Sleep(time.Duration(3*integration.ElectionTicks) * framecfg.TickDuration)
leader.Restart(t)
}

newLeaderId := clus.WaitLeader(t)
c2 := integration.ToGRPC(clus.Client(newLeaderId))
newLeaderID := clus.WaitLeader(t)
c2 := integration.ToGRPC(clus.Client(newLeaderID))

time.Sleep(250 * time.Millisecond)

Expand Down
12 changes: 6 additions & 6 deletions tests/integration/v3_watch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
t.Fatalf("#%d: canceled watcher on create %+v", i, cresp)
}

createdWatchId := cresp.WatchId
createdWatchID := cresp.WatchId
if cresp.Header == nil || cresp.Header.Revision != 1 {
t.Fatalf("#%d: header revision got +%v, wanted revison 1", i, cresp)
}
Expand Down Expand Up @@ -278,8 +278,8 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
if wresp.Created != resp.Created {
t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
}
if resp.WatchId != createdWatchId {
t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
if resp.WatchId != createdWatchID {
t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchID)
}

if !reflect.DeepEqual(resp.Events, wresp.Events) {
Expand Down Expand Up @@ -1445,8 +1445,8 @@ func TestV3WatchProgressWaitsForSync(t *testing.T) {

// Verify that we get the watch responses first. Note that
// events might be spread across multiple packets.
var event_count = 0
for event_count < count {
eventCount := 0
for eventCount < count {
wr := <-wch
if wr.Err() != nil {
t.Fatal(fmt.Errorf("watch error: %w", wr.Err()))
Expand All @@ -1457,7 +1457,7 @@ func TestV3WatchProgressWaitsForSync(t *testing.T) {
if wr.Header.Revision != int64(count+1) {
t.Fatal("Incomplete watch response!")
}
event_count += len(wr.Events)
eventCount += len(wr.Events)
}
// client needs to request progress notification again
err = client.RequestProgress(ctx)
Expand Down
Loading