Skip to content

Modernize go code #2252

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions api/v1beta2/foundationdb_database_configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -925,10 +925,10 @@ var versionFlagIndices = make(map[string]int)
var roleNames = fieldNames(RoleCounts{})

// fieldNames provides the names of fields on a structure.
func fieldNames(value interface{}) []ProcessClass {
func fieldNames(value any) []ProcessClass {
countType := reflect.TypeOf(value)
names := make([]ProcessClass, 0, countType.NumField())
for index := 0; index < countType.NumField(); index++ {
for index := range countType.NumField() {
tag := strings.Split(countType.Field(index).Tag.Get("json"), ",")
names = append(names, ProcessClass(tag[0]))
}
Expand All @@ -951,10 +951,10 @@ func init() {

// fieldIndices provides a map from the names of fields in a structure to the
// index of each field in the list of fields.
func fieldIndices(value interface{}, result interface{}, keyType reflect.Type) {
func fieldIndices(value any, result any, keyType reflect.Type) {
countType := reflect.TypeOf(value)
resultValue := reflect.ValueOf(result)
for index := 0; index < countType.NumField(); index++ {
for index := range countType.NumField() {
tag := strings.Split(countType.Field(index).Tag.Get("json"), ",")
resultValue.SetMapIndex(reflect.ValueOf(tag[0]).Convert(keyType), reflect.ValueOf(index))
}
Expand Down
8 changes: 3 additions & 5 deletions api/v1beta2/foundationdb_process_address.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
"fmt"
"net"
"regexp"
"sort"
"slices"
"strconv"
"strings"
)
Expand Down Expand Up @@ -96,9 +96,7 @@ func (address ProcessAddress) SortedFlags() []string {
}
}

sort.Slice(flags, func(i int, j int) bool {
return flags[i] < flags[j]
})
slices.Sort(flags)

return flags
}
Expand Down Expand Up @@ -149,7 +147,7 @@ func (address *ProcessAddress) UnmarshalJSON(data []byte) error {

// MarshalJSON defines the parsing method for the ProcessAddress field from struct to JSON
func (address ProcessAddress) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", address.String())), nil
return fmt.Appendf(nil, "\"%s\"", address.String()), nil
}

// ParseProcessAddress parses a structured address from its string
Expand Down
29 changes: 9 additions & 20 deletions api/v1beta2/foundationdbcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"slices"
)

// +kubebuilder:object:root=true
Expand Down Expand Up @@ -1799,7 +1800,7 @@ func (str *ConnectionString) String() string {
// GenerateNewGenerationID builds a new generation ID
func (str *ConnectionString) GenerateNewGenerationID() error {
id := strings.Builder{}
for i := 0; i < 32; i++ {
for range 32 {
err := id.WriteByte(alphanum[rand.Intn(len(alphanum))])
if err != nil {
return err
Expand Down Expand Up @@ -2022,19 +2023,11 @@ func (cluster *FoundationDBCluster) ProcessGroupIsBeingRemoved(processGroupID Pr
}
}

for _, id := range cluster.Spec.ProcessGroupsToRemove {
if id == processGroupID {
return true
}
}

for _, id := range cluster.Spec.ProcessGroupsToRemoveWithoutExclusion {
if id == processGroupID {
return true
}
if slices.Contains(cluster.Spec.ProcessGroupsToRemove, processGroupID) {
return true
}

return false
return slices.Contains(cluster.Spec.ProcessGroupsToRemoveWithoutExclusion, processGroupID)
}

// ShouldUseLocks determine whether we should use locks to coordinator global
Expand Down Expand Up @@ -2262,20 +2255,16 @@ const (
// AddServersPerDisk adds serverPerDisk to the status field to keep track which ConfigMaps should be kept
func (clusterStatus *FoundationDBClusterStatus) AddServersPerDisk(serversPerDisk int, pClass ProcessClass) {
if pClass == ProcessClassStorage {
for _, curServersPerDisk := range clusterStatus.StorageServersPerDisk {
if curServersPerDisk == serversPerDisk {
return
}
if slices.Contains(clusterStatus.StorageServersPerDisk, serversPerDisk) {
return
}
clusterStatus.StorageServersPerDisk = append(clusterStatus.StorageServersPerDisk, serversPerDisk)
return
}

if pClass.SupportsMultipleLogServers() {
for _, curServersPerDisk := range clusterStatus.LogServersPerDisk {
if curServersPerDisk == serversPerDisk {
return
}
if slices.Contains(clusterStatus.LogServersPerDisk, serversPerDisk) {
return
}
clusterStatus.LogServersPerDisk = append(clusterStatus.LogServersPerDisk, serversPerDisk)
}
Expand Down
7 changes: 2 additions & 5 deletions controllers/add_process_groups.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,7 @@ func (a addProcessGroups) reconcile(ctx context.Context, r *FoundationDBClusterR

hasNewProcessGroups := false
for _, processClass := range fdbv1beta2.ProcessClasses {
desiredCount := desiredCounts[processClass]
if desiredCount < 0 {
desiredCount = 0
}
desiredCount := max(desiredCounts[processClass], 0)
newCount := desiredCount - processCounts[processClass]
if newCount <= 0 {
continue
Expand All @@ -73,7 +70,7 @@ func (a addProcessGroups) reconcile(ctx context.Context, r *FoundationDBClusterR
hasNewProcessGroups = true
logger.Info("Adding new Process Groups", "processClass", processClass, "newCount", newCount, "desiredCount", desiredCount, "currentCount", processCounts[processClass])
r.Recorder.Event(cluster, corev1.EventTypeNormal, "AddingProcesses", fmt.Sprintf("Adding %d %s processes", newCount, processClass))
for i := 0; i < newCount; i++ {
for range newCount {
processGroupID := cluster.GetNextRandomProcessGroupIDWithExclusions(processClass, processGroupIDs[processClass], exclusions)
logger.Info("Adding new Process Group to cluster", "processClass", processClass, "processGroupID", processGroupID, "exclusions", exclusions)
cluster.Status.ProcessGroups = append(cluster.Status.ProcessGroups, fdbv1beta2.NewProcessGroupStatus(processGroupID, processClass, nil))
Expand Down
2 changes: 1 addition & 1 deletion controllers/add_process_groups_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ var _ = Describe("add_process_groups", func() {
excludedCnt := 100
exclusions := make([]fdbv1beta2.ProcessAddress, 0, excludedCnt)
excludedProcessGroupIDs = map[fdbv1beta2.ProcessGroupID]fdbv1beta2.None{}
for i := 0; i < excludedCnt; i++ {
for i := range excludedCnt {
processGroupID := fdbv1beta2.ProcessGroupID(fmt.Sprintf("storage-%d", i))
if _, ok := currentProcessGroupIDs[processGroupID]; ok {
continue
Expand Down
5 changes: 1 addition & 4 deletions controllers/bounce_processes.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,7 @@ func (c bounceProcesses) reconcile(_ context.Context, r *FoundationDBClusterReco
if err != nil {
r.Recorder.Event(cluster, corev1.EventTypeNormal, "NeedsBounce", err.Error())
// Retry after we waited the minimum uptime or at least 15 seconds.
delayTime := cluster.GetMinimumUptimeSecondsForBounce() - int(currentMinimumUptime)
if delayTime < 15 {
delayTime = 15
}
delayTime := max(cluster.GetMinimumUptimeSecondsForBounce()-int(currentMinimumUptime), 15)

return &requeue{
message: err.Error(),
Expand Down
2 changes: 1 addition & 1 deletion controllers/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,7 @@ var _ = Describe("cluster_controller", func() {

sortPodsByName(pods)

for i := 0; i < 17; i++ {
for i := range 17 {
Expect(pods.Items[i].Name).To(Equal(originalPods.Items[i].Name))
}
})
Expand Down
2 changes: 1 addition & 1 deletion controllers/controllers.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ type requeue struct {
}

// processRequeue interprets a requeue result from a subreconciler.
func processRequeue(requeue *requeue, subReconciler interface{}, object runtime.Object, recorder record.EventRecorder, logger logr.Logger) (ctrl.Result, error) {
func processRequeue(requeue *requeue, subReconciler any, object runtime.Object, recorder record.EventRecorder, logger logr.Logger) (ctrl.Result, error) {
curLog := logger.WithValues("reconciler", fmt.Sprintf("%T", subReconciler), "requeueAfter", requeue.delay)
if requeue.message == "" && requeue.curError != nil {
requeue.message = requeue.curError.Error()
Expand Down
2 changes: 1 addition & 1 deletion controllers/replace_failed_process_groups_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ var _ = Describe("replace_failed_process_groups", func() {

BeforeEach(func() {
targetProcessGroups = make([]fdbv1beta2.ProcessGroupID, 2)
for i := 0; i < 2; i++ {
for i := range 2 {
targetProcessGroup := cluster.Status.ProcessGroups[i]
timestamp := time.Now().Add(-10 * time.Minute).Unix()
targetProcessGroup.UpdateCondition(fdbv1beta2.NodeTaintDetected, true)
Expand Down
5 changes: 1 addition & 4 deletions controllers/update_backup_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,7 @@ func (s updateBackupStatus) reconcile(ctx context.Context, r *FoundationDBBackup
}

if currentBackupDeployment != nil && desiredBackupDeployment != nil {
status.AgentCount = int(currentBackupDeployment.Status.ReadyReplicas)
if status.AgentCount > int(currentBackupDeployment.Status.UpdatedReplicas) {
status.AgentCount = int(currentBackupDeployment.Status.UpdatedReplicas)
}
status.AgentCount = min(int(currentBackupDeployment.Status.ReadyReplicas), int(currentBackupDeployment.Status.UpdatedReplicas))
generationsMatch := currentBackupDeployment.Status.ObservedGeneration == currentBackupDeployment.ObjectMeta.Generation

annotationChange := mergeAnnotations(&currentBackupDeployment.ObjectMeta, desiredBackupDeployment.ObjectMeta)
Expand Down
9 changes: 2 additions & 7 deletions controllers/update_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"slices"
)

// updateStatus provides a reconciliation step for updating the status in the
Expand Down Expand Up @@ -561,13 +562,7 @@ func validateProcessGroups(ctx context.Context, r *FoundationDBClusterReconciler
status.AddServersPerDisk(processCount, processGroup.ProcessClass)

imageType := internal.GetImageType(pod)
imageTypeFound := false
for _, currentImageType := range status.ImageTypes {
if imageType == currentImageType {
imageTypeFound = true
break
}
}
imageTypeFound := slices.Contains(status.ImageTypes, imageType)
if !imageTypeFound {
status.ImageTypes = append(status.ImageTypes, imageType)
}
Expand Down
2 changes: 1 addition & 1 deletion e2e/fixtures/chaos_network.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func (factory *Factory) InjectNetworkLoss(lossPercentage string, source chaosmes
// InjectNetworkLossBetweenPods Injects network loss b/w each combination of podGroups.
func (factory *Factory) InjectNetworkLossBetweenPods(pods []chaosmesh.PodSelectorSpec, loss string) {
count := len(pods)
for i := 0; i < count; i++ {
for i := range count {
for j := i + 1; j < count; j++ {
factory.InjectNetworkLoss(loss, pods[i], pods[j], chaosmesh.Both)
}
Expand Down
4 changes: 2 additions & 2 deletions e2e/fixtures/fdb_backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,14 +216,14 @@ func (fdbBackup *FdbBackup) WaitForRestorableVersion(version uint64) {
false,
)
g.Expect(err).NotTo(gomega.HaveOccurred())
var result map[string]interface{}
var result map[string]any
g.Expect(json.Unmarshal([]byte(out), &result)).NotTo(gomega.HaveOccurred())

restorable, ok := result["Restorable"].(bool)
g.Expect(ok).To(gomega.BeTrue())
g.Expect(restorable).To(gomega.BeTrue())

restorablePoint, ok := result["LatestRestorablePoint"].(map[string]interface{})
restorablePoint, ok := result["LatestRestorablePoint"].(map[string]any)
g.Expect(ok).To(gomega.BeTrue())

restorableVersion, ok := restorablePoint["Version"].(float64)
Expand Down
2 changes: 1 addition & 1 deletion e2e/fixtures/fdb_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1825,7 +1825,7 @@ func (fdbCluster *FdbCluster) GenerateRandomValues(
res := make([]KeyValue, 0, n)
index := []byte{'a'}
var err error
for i := 0; i < n; i++ {
for range n {
res = append(res, KeyValue{
Key: append([]byte{prefix}, index...),
Value: []byte(fdbCluster.factory.RandStringRunes(4)),
Expand Down
12 changes: 6 additions & 6 deletions e2e/fixtures/fdb_cluster_creation_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ import (
// CreationTrackerLogger is an interface that can be used to log the time between different creation steps.
type CreationTrackerLogger interface {
// NewEntry adds an entry to the internal map.
NewEntry() map[string]interface{}
NewEntry() map[string]any
// Log will write the values in the map directly to the logger.
Log(values map[string]interface{}) error
Log(values map[string]any) error
// Flush will write all values from the entry map to the logger.
Flush() error
}
Expand All @@ -51,12 +51,12 @@ func NewDefaultCreationTrackerLogger() CreationTrackerLogger {
type DefaultCreationTrackerLogger struct{}

// NewEntry adds an entry to the internal map.
func (logger *DefaultCreationTrackerLogger) NewEntry() map[string]interface{} {
return map[string]interface{}{}
func (logger *DefaultCreationTrackerLogger) NewEntry() map[string]any {
return map[string]any{}
}

// Log will write the values in the map directly to the logger.
func (logger *DefaultCreationTrackerLogger) Log(_ map[string]interface{}) error {
func (logger *DefaultCreationTrackerLogger) Log(_ map[string]any) error {
return nil
}

Expand Down Expand Up @@ -132,7 +132,7 @@ func (tracker *fdbClusterCreationTracker) nextStep() {
log.Println("Finished step", tracker.currentStep.String(), "in", duration)

// Log the duration in milliseconds
gomega.Expect(tracker.logger.Log(map[string]interface{}{
gomega.Expect(tracker.logger.Log(map[string]any{
tracker.currentStep.String(): duration.Milliseconds(),
})).NotTo(gomega.HaveOccurred())
}
Expand Down
2 changes: 1 addition & 1 deletion e2e/fixtures/fixtures.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (shutdown *ShutdownHooks) Defer(f func() error) {
}

// ToJSON tries to convert any object to a string representing the struct as JSON.
func ToJSON(v interface{}) string {
func ToJSON(v any) string {
s, err := json.Marshal(v)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return string(s)
Expand Down
4 changes: 2 additions & 2 deletions e2e/fixtures/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func (factory *Factory) RandomPickPod(input []corev1.Pod, count int) []corev1.Po
maxPods = len(input)
}

for i := 0; i < maxPods; i++ {
for i := range maxPods {
ret[i] = input[perm[i]]
}

Expand All @@ -84,7 +84,7 @@ func (factory *Factory) RandomPickCluster(input []*FdbCluster, count int) []*Fdb
maxPods = len(input)
}

for i := 0; i < maxPods; i++ {
for i := range maxPods {
ret[i] = input[perm[i]]
}

Expand Down
4 changes: 2 additions & 2 deletions e2e/test_operator_stress/operator_stress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ var _ = Describe("Operator Stress", Label("e2e"), func() {
It("should create a healthy and available cluster", func() {
// Since Ginkgo doesn't support what we want, we run this multiple times.
// We create and delete a cluster 10 times to ensure we don't have any flaky behaviour in the operator.
for i := 0; i < 10; i++ {
for range 10 {
fdbCluster := factory.CreateFdbCluster(
fixtures.DefaultClusterConfig(false),
factory.GetClusterOptions()...,
Expand Down Expand Up @@ -81,7 +81,7 @@ var _ = Describe("Operator Stress", Label("e2e"), func() {

It("should replace the targeted Pod", func() {
// Since Ginkgo doesn't support what we want, we run this multiple times.
for i := 0; i < 10; i++ {
for range 10 {
Expect(fdbCluster.ClearProcessGroupsToRemove()).ShouldNot(HaveOccurred())
pod := factory.ChooseRandomPod(fdbCluster.GetPods())
fdbCluster.ReplacePod(*pod, true)
Expand Down
8 changes: 4 additions & 4 deletions fdbclient/admin_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,7 @@ func (client *cliAdminClient) SetKnobs(knobs []string) {

// WithValues will update the logger used by the current AdminClient to contain the provided key value pairs. The provided
// arguments must be even.
func (client *cliAdminClient) WithValues(keysAndValues ...interface{}) {
func (client *cliAdminClient) WithValues(keysAndValues ...any) {
newLogger := client.log.WithValues(keysAndValues...)
client.log = newLogger

Expand Down Expand Up @@ -745,7 +745,7 @@ func (client *cliAdminClient) GetProcessesUnderMaintenance() (map[fdbv1beta2.Pro

maintenancePrefix := client.Cluster.GetMaintenancePrefix() + "/"

maintenanceProcesses, err := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
maintenanceProcesses, err := db.Transact(func(tr fdb.Transaction) (any, error) {
err := tr.Options().SetReadSystemKeys()
if err != nil {
return nil, err
Expand Down Expand Up @@ -799,7 +799,7 @@ func (client *cliAdminClient) RemoveProcessesUnderMaintenance(processGroupIDs []
return err
}

_, err = db.Transact(func(tr fdb.Transaction) (interface{}, error) {
_, err = db.Transact(func(tr fdb.Transaction) (any, error) {
err := tr.Options().SetAccessSystemKeys()
if err != nil {
return nil, err
Expand Down Expand Up @@ -831,7 +831,7 @@ func (client *cliAdminClient) SetProcessesUnderMaintenance(processGroupIDs []fdb
return err
}

_, err = db.Transact(func(tr fdb.Transaction) (interface{}, error) {
_, err = db.Transact(func(tr fdb.Transaction) (any, error) {
err := tr.Options().SetAccessSystemKeys()
if err != nil {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion fdbclient/fdb_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func (fdbClient *realFdbLibClient) getValueFromDBUsingKey(fdbKey string, timeout
return nil, err
}

result, err := database.Transact(func(transaction fdb.Transaction) (interface{}, error) {
result, err := database.Transact(func(transaction fdb.Transaction) (any, error) {
err := transaction.Options().SetAccessSystemKeys()
if err != nil {
return nil, err
Expand Down
Loading
Loading