Skip to content

Commit

Permalink
Fix the 'printf: non-constant format string in call to fmt.Errorf (go…
Browse files Browse the repository at this point in the history
…vet)' lint errors (#666)

Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com>
  • Loading branch information
tenzen-y authored Oct 16, 2024
1 parent 5caa9d5 commit c738a83
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 23 deletions.
1 change: 0 additions & 1 deletion .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,3 @@ jobs:
with:
version: v1.61.0
args: --timeout=5m
only-new-issues: true
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ tidy:

.PHONY: lint
lint: bin/golangci-lint ## Run golangci-lint linter
$(GOLANGCI_LINT) run --new-from-rev=origin/master
$(GOLANGCI_LINT) run

# Generate deploy/v2beta1/mpi-operator.yaml
manifest: kustomize crd
Expand All @@ -146,7 +146,7 @@ bin/envtest: bin ## Download envtest-setup locally if necessary.
@GOBIN=$(PROJECT_DIR)/bin go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest

bin/kubectl: bin
curl -L -o $(PROJECT_DIR)/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/$(GOOS)/$(GOARCH)/kubectl
curl -L -o $(PROJECT_DIR)/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/$(GOOS)/$(GOARCH)/kubectl
chmod +x $(PROJECT_DIR)/bin/kubectl

.PHONY: kind
Expand Down
41 changes: 21 additions & 20 deletions pkg/controller/mpi_job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"crypto/rand"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"reflect"
"sort"
Expand All @@ -34,7 +35,7 @@ import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
Expand Down Expand Up @@ -359,7 +360,7 @@ func NewMPIJobControllerWithClock(
// Pipe to default handler first, which just logs the error
cache.DefaultWatchErrorHandler(r, err)

if errors.IsUnauthorized(err) || errors.IsForbidden(err) {
if apierrors.IsUnauthorized(err) || apierrors.IsForbidden(err) {
klog.Fatalf("Unable to sync cache for informer %s: %s. Requesting controller to exit.", name, err)
}
})
Expand Down Expand Up @@ -564,7 +565,7 @@ func (c *MPIJobController) syncHandler(key string) error {
sharedJob, err := c.mpiJobLister.MPIJobs(namespace).Get(name)
if err != nil {
// The MPIJob may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
klog.V(4).Infof("MPIJob has been deleted: %v", key)
return nil
}
Expand Down Expand Up @@ -714,7 +715,7 @@ func cleanUpWorkerPods(mpiJob *kubeflow.MPIJob, c *MPIJobController) error {
// getLauncherJob gets the launcher Job controlled by this MPIJob.
func (c *MPIJobController) getLauncherJob(mpiJob *kubeflow.MPIJob) (*batchv1.Job, error) {
launcher, err := c.jobLister.Jobs(mpiJob.Namespace).Get(mpiJob.Name + launcherSuffix)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
Expand All @@ -729,7 +730,7 @@ func (c *MPIJobController) getLauncherJob(mpiJob *kubeflow.MPIJob) (*batchv1.Job
if !metav1.IsControlledBy(launcher, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, launcher.Name, launcher.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return launcher, fmt.Errorf(msg)
return launcher, errors.New(msg)
}

return launcher, nil
Expand All @@ -740,7 +741,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
newPodGroup := c.PodGroupCtrl.newPodGroup(mpiJob)
podGroup, err := c.PodGroupCtrl.getPodGroup(newPodGroup.GetNamespace(), newPodGroup.GetName())
// If the PodGroup doesn't exist, we'll create it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return c.PodGroupCtrl.createPodGroup(context.TODO(), newPodGroup)
}
// If an error occurs during Get/Create, we'll requeue the item so we
Expand All @@ -754,7 +755,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
if !metav1.IsControlledBy(podGroup, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, podGroup.GetName(), "PodGroup")
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

if !c.PodGroupCtrl.pgSpecsAreEqual(podGroup, newPodGroup) {
Expand All @@ -767,7 +768,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
func (c *MPIJobController) deletePodGroups(mpiJob *kubeflow.MPIJob) error {
podGroup, err := c.PodGroupCtrl.getPodGroup(mpiJob.Namespace, mpiJob.Name)
if err != nil {
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil
}
return err
Expand All @@ -778,7 +779,7 @@ func (c *MPIJobController) deletePodGroups(mpiJob *kubeflow.MPIJob) error {
if !metav1.IsControlledBy(podGroup, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, podGroup.GetName(), "PodGroup")
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return fmt.Errorf(msg)
return errors.New(msg)
}

// If the PodGroup exist, we'll delete it.
Expand Down Expand Up @@ -839,7 +840,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev

cm, err := c.configMapLister.ConfigMaps(mpiJob.Namespace).Get(mpiJob.Name + configSuffix)
// If the ConfigMap doesn't exist, we'll create it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return c.kubeClient.CoreV1().ConfigMaps(mpiJob.Namespace).Create(context.TODO(), newCM, metav1.CreateOptions{})
}
if err != nil {
Expand All @@ -851,7 +852,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev
if !metav1.IsControlledBy(cm, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, cm.Name, cm.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

// If the ConfigMap is changed, update it
Expand All @@ -869,7 +870,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev

func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *corev1.Service) (*corev1.Service, error) {
svc, err := c.serviceLister.Services(job.Namespace).Get(newSvc.Name)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return c.kubeClient.CoreV1().Services(job.Namespace).Create(context.TODO(), newSvc, metav1.CreateOptions{})
}
if err != nil {
Expand All @@ -878,7 +879,7 @@ func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *core
if !metav1.IsControlledBy(svc, job) {
msg := fmt.Sprintf(MessageResourceExists, svc.Name, svc.Kind)
c.recorder.Event(job, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

// If the Service selector is changed, update it.
Expand All @@ -895,7 +896,7 @@ func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *core
// or create one if it doesn't exist.
func (c *MPIJobController) getOrCreateSSHAuthSecret(job *kubeflow.MPIJob) (*corev1.Secret, error) {
secret, err := c.secretLister.Secrets(job.Namespace).Get(job.Name + sshAuthSecretSuffix)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
secret, err := newSSHAuthSecret(job)
if err != nil {
return nil, err
Expand All @@ -908,7 +909,7 @@ func (c *MPIJobController) getOrCreateSSHAuthSecret(job *kubeflow.MPIJob) (*core
if !metav1.IsControlledBy(secret, job) {
msg := fmt.Sprintf(MessageResourceExists, secret.Name, secret.Kind)
c.recorder.Event(job, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}
newSecret, err := newSSHAuthSecret(job)
if err != nil {
Expand Down Expand Up @@ -973,7 +974,7 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
pod, err := c.podLister.Pods(mpiJob.Namespace).Get(workerName(mpiJob, i))

// If the worker Pod doesn't exist, we'll create it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
worker := c.newWorker(mpiJob, i)
pod, err = c.kubeClient.CoreV1().Pods(mpiJob.Namespace).Create(context.TODO(), worker, metav1.CreateOptions{})
}
Expand All @@ -989,7 +990,7 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
if pod != nil && !metav1.IsControlledBy(pod, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, pod.Name, pod.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}
workerPods = append(workerPods, pod)
}
Expand Down Expand Up @@ -1020,15 +1021,15 @@ func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
pod, err := c.podLister.Pods(mpiJob.Namespace).Get(name)

// If the worker Pod doesn't exist, no need to remove it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
continue
}
// If the worker is not controlled by this MPIJob resource, we should log
// a warning to the event recorder and return.
if pod != nil && !metav1.IsControlledBy(pod, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, pod.Name, pod.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return fmt.Errorf(msg)
return errors.New(msg)
}
// If the worker pod is not running and cleanupPolicy is
// set to CleanPodPolicyRunning, keep the pod.
Expand All @@ -1039,7 +1040,7 @@ func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
continue
}
err = c.kubeClient.CoreV1().Pods(mpiJob.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
klog.Errorf("Failed to delete pod[%s/%s]: %v", mpiJob.Namespace, name, err)
return err
}
Expand Down

0 comments on commit c738a83

Please sign in to comment.