Skip to content

Commit

Permalink
Refine log messages
Browse files Browse the repository at this point in the history
Longhorn 6984

Signed-off-by: Derek Su <derek.su@suse.com>
  • Loading branch information
derekbit committed Nov 13, 2023
1 parent ba8973b commit a94d1cd
Show file tree
Hide file tree
Showing 10 changed files with 51 additions and 47 deletions.
4 changes: 2 additions & 2 deletions controller/backing_image_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ func (bic *BackingImageController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer bic.queue.ShutDown()

logrus.Info("Starting Longhorn Backing Image controller")
defer logrus.Info("Shut down Longhorn Backing Image controller")
bic.logger.Info("Starting Longhorn Backing Image controller")
defer bic.logger.Info("Shut down Longhorn Backing Image controller")

if !cache.WaitForNamedCacheSync("longhorn backing images", stopCh, bic.cacheSyncs...) {
return
Expand Down
4 changes: 2 additions & 2 deletions controller/backing_image_data_source_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ func (c *BackingImageDataSourceController) Run(workers int, stopCh <-chan struct
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()

logrus.Info("Starting Longhorn backing image data source controller")
defer logrus.Info("Shut down Longhorn backing image data source controller")
c.logger.Info("Starting Longhorn backing image data source controller")
defer c.logger.Info("Shut down Longhorn backing image data source controller")

if !cache.WaitForNamedCacheSync("longhorn backing image data source", stopCh, c.cacheSyncs...) {
return
Expand Down
8 changes: 4 additions & 4 deletions controller/backing_image_manager_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,8 @@ func (c *BackingImageManagerController) Run(workers int, stopCh <-chan struct{})
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()

logrus.Info("Starting Longhorn backing image manager controller")
defer logrus.Info("Shut down Longhorn backing image manager controller")
c.logger.Info("Starting Longhorn backing image manager controller")
defer c.logger.Info("Shut down Longhorn backing image manager controller")

if !cache.WaitForNamedCacheSync("longhorn backing image manager", stopCh, c.cacheSyncs...) {
return
Expand Down Expand Up @@ -278,7 +278,7 @@ func (c *BackingImageManagerController) syncBackingImageManager(key string) (err
_, err = c.ds.UpdateBackingImageManagerStatus(bim)
}
if apierrors.IsConflict(errors.Cause(err)) {
logrus.WithError(err).Debugf("Requeue %v due to conflict", key)
log.WithError(err).Debugf("Requeue %v due to conflict", key)
c.enqueueBackingImageManager(bim)
err = nil
}
Expand Down Expand Up @@ -460,7 +460,7 @@ func (c *BackingImageManagerController) syncBackingImageManagerPod(bim *longhorn
storageIP := c.ds.GetStorageIPFromPod(pod)
if bim.Status.StorageIP != storageIP {
bim.Status.StorageIP = storageIP
logrus.Warnf("Inconsistent storage IP from pod %v, update backing image status storage IP %v", pod.Name, bim.Status.StorageIP)
log.Warnf("Inconsistent storage IP from pod %v, update backing image status storage IP %v", pod.Name, bim.Status.StorageIP)
}

bim.Status.IP = pod.Status.PodIP
Expand Down
6 changes: 3 additions & 3 deletions controller/instance_manager_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,8 @@ func (imc *InstanceManagerController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer imc.queue.ShutDown()

logrus.Info("Starting Longhorn instance manager controller")
defer logrus.Info("Shut down Longhorn instance manager controller")
imc.logger.Info("Starting Longhorn instance manager controller")
defer imc.logger.Info("Shut down Longhorn instance manager controller")

if !cache.WaitForNamedCacheSync("longhorn instance manager", stopCh, imc.cacheSyncs...) {
return
Expand Down Expand Up @@ -996,7 +996,7 @@ func (imc *InstanceManagerController) cleanupInstanceManager(imName string) erro
return err
}
if pod != nil && pod.DeletionTimestamp == nil {
logrus.Infof("Deleting instance manager pod %v for instance manager %v", pod.Name, imName)
imc.logger.Infof("Deleting instance manager pod %v for instance manager %v", pod.Name, imName)
if err := imc.ds.DeletePod(pod.Name); err != nil {
return err
}
Expand Down
16 changes: 8 additions & 8 deletions controller/kubernetes_node_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ func (knc *KubernetesNodeController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer knc.queue.ShutDown()

logrus.Info("Starting Longhorn Kubernetes node controller")
defer logrus.Info("Shut down Longhorn Kubernetes node controller")
knc.logger.Info("Starting Longhorn Kubernetes node controller")
defer knc.logger.Info("Shut down Longhorn Kubernetes node controller")

if !cache.WaitForNamedCacheSync("longhorn kubernetes node", stopCh, knc.cacheSyncs...) {
return
Expand Down Expand Up @@ -176,11 +176,11 @@ func (knc *KubernetesNodeController) syncKubernetesNode(key string) (err error)
if !datastore.ErrorIsNotFound(err) {
return err
}
logrus.Warnf("Kubernetes node %v has been deleted", key)
knc.logger.Warnf("Kubernetes node %v has been deleted", key)
}

if kubeNode == nil {
logrus.Infof("Cleaning up Longhorn node %v since failed to find the related kubernetes node", name)
knc.logger.Infof("Cleaning up Longhorn node %v since failed to find the related kubernetes node", name)
if err := knc.ds.DeleteNode(name); err != nil {
return err
}
Expand All @@ -204,7 +204,7 @@ func (knc *KubernetesNodeController) syncKubernetesNode(key string) (err error)
}
// requeue if it's conflict
if apierrors.IsConflict(errors.Cause(err)) {
logrus.Debugf("Requeue %v due to conflict: %v", key, err)
knc.logger.WithError(err).Debugf("Requeue %v due to conflict", key)
knc.enqueueLonghornNode(node)
err = nil
}
Expand Down Expand Up @@ -309,11 +309,11 @@ func (knc *KubernetesNodeController) syncDefaultDisks(node *longhorn.Node) (err
}
disks, err = types.CreateDisksFromAnnotation(annotation)
if err != nil {
logrus.Warnf("Failed to create disk from annotation, invalid annotation %v: %v: %v", types.KubeNodeDefaultDiskConfigAnnotationKey, val, err)
knc.logger.WithError(err).Warnf("Failed to create disk from annotation, invalid annotation %v: %v", types.KubeNodeDefaultDiskConfigAnnotationKey, val)
return nil
}
default:
logrus.Warnf("Got invalid label value for %v: %v", types.NodeCreateDefaultDiskLabelKey, val)
knc.logger.Warnf("Got invalid label value for %v: %v", types.NodeCreateDefaultDiskLabelKey, val)
return nil
}

Expand All @@ -339,7 +339,7 @@ func (knc *KubernetesNodeController) syncDefaultNodeTags(node *longhorn.Node) er
if val, exist := kubeNode.Annotations[types.KubeNodeDefaultNodeTagConfigAnnotationKey]; exist {
tags, err := types.GetNodeTagsFromAnnotation(val)
if err != nil {
logrus.Warnf("Failed to set default node tags for node %v: %v", node.Name, err)
knc.logger.WithError(err).Warnf("Failed to set default node tags for node %v", node.Name)
return nil
}
node.Spec.Tags = tags
Expand Down
6 changes: 3 additions & 3 deletions controller/kubernetes_pv_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ func (kc *KubernetesPVController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer kc.queue.ShutDown()

logrus.Info("Starting Kubernetes PV controller")
defer logrus.Info("Shut down kubernetes PV controller")
kc.logger.Info("Starting Kubernetes PV controller")
defer kc.logger.Info("Shut down kubernetes PV controller")

if !cache.WaitForNamedCacheSync("kubernetes", stopCh, kc.cacheSyncs...) {
return
Expand Down Expand Up @@ -202,7 +202,7 @@ func (kc *KubernetesPVController) syncKubernetesStatus(key string) (err error) {
}
// requeue if it's conflict
if apierrors.IsConflict(errors.Cause(err)) {
logrus.Debugf("Requeue for volume %v due to conflict: %v", volumeName, err)
kc.logger.WithError(err).Debugf("Requeue for volume %v due to conflict", volumeName)
kc.enqueueVolumeChange(volume)
err = nil
}
Expand Down
8 changes: 4 additions & 4 deletions controller/monitor/disk_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ func (m *NodeMonitor) collectDiskData(node *longhorn.Node) map[string]*Collected
if err != nil {
// If failed to create disk service client, just log a warning and continue.
// The error out will hinder the following logics in syncNode.
logrus.WithError(err).Warnf("Failed to create disk service client")
m.logger.WithError(err).Warnf("Failed to create disk service client")
}
defer func() {
for _, diskServiceClient := range diskServiceClients {
Expand Down Expand Up @@ -248,13 +248,13 @@ func (m *NodeMonitor) collectDiskData(node *longhorn.Node) map[string]*Collected

replicaInstanceNames, err := m.getReplicaInstanceNamesHandler(disk.Type, node, diskName, diskConfig.DiskUUID, disk.Path, diskServiceClient)
if err != nil {
logrus.WithError(err).Warnf("Failed to get replica instance names for disk %v(%v) on node %v", diskName, disk.Path, node.Name)
m.logger.WithError(err).Warnf("Failed to get replica instance names for disk %v(%v) on node %v", diskName, disk.Path, node.Name)
continue
}

orphanedReplicaDirectoryNames, err = m.getOrphanedReplicaInstanceNames(disk.Type, node, diskName, diskConfig.DiskUUID, disk.Path, replicaInstanceNames)
if err != nil {
logrus.WithError(err).Warnf("Failed to get orphaned replica instance names for disk %v(%v) on node %v", diskName, disk.Path, node.Name)
m.logger.WithError(err).Warnf("Failed to get orphaned replica instance names for disk %v(%v) on node %v", diskName, disk.Path, node.Name)
continue
}

Expand Down Expand Up @@ -359,7 +359,7 @@ func (m *NodeMonitor) getOrphanedReplicaDirectoryNames(node *longhorn.Node, disk
// Find out the orphaned directories by checking with replica CRs
replicas, err := m.ds.ListReplicasByDiskUUID(diskUUID)
if err != nil {
logrus.Errorf("unable to list replicas for disk UUID %v since %v", diskUUID, err.Error())
m.logger.Errorf("unable to list replicas for disk UUID %v since %v", diskUUID, err.Error())
return map[string]string{}, nil
}

Expand Down
4 changes: 2 additions & 2 deletions controller/monitor/snapshot_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,14 +226,14 @@ func (m *SnapshotMonitor) handleErr(err error, key interface{}) {
}

if m.snapshotCheckTaskQueue.NumRequeues(key) < snapshotHashMaxRetries {
logrus.Warnf("Error syncing snapshot check task %v: %v", key, err)
m.logger.WithError(err).Warnf("Error syncing snapshot check task %v", key)
m.snapshotCheckTaskQueue.AddRateLimited(key)
return
}

utilruntime.HandleError(err)

logrus.Warnf("Dropping hashing request of snapshot %v: %v", key, err)
m.logger.WithError(err).Warnf("Dropping hashing request of snapshot %v", key)
m.snapshotCheckTaskQueue.Forget(key)
}

Expand Down
36 changes: 20 additions & 16 deletions controller/node_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,12 +209,12 @@ func (nc *NodeController) isResponsibleForSnapshot(obj interface{}) bool {
}
volumeName, ok := snapshot.Labels[types.LonghornLabelVolume]
if !ok {
logrus.Warnf("Failed to find volume name from snapshot %v", snapshot.Name)
nc.logger.Warnf("Failed to find volume name from snapshot %v", snapshot.Name)
return false
}
volume, err := nc.ds.GetVolumeRO(volumeName)
if err != nil {
logrus.WithError(err).Warnf("Failed to get volume for snapshot %v", snapshot.Name)
nc.logger.WithError(err).Warnf("Failed to get volume for snapshot %v", snapshot.Name)
return false
}
if volume.Status.OwnerID != nc.controllerID {
Expand All @@ -227,7 +227,7 @@ func (nc *NodeController) isResponsibleForSnapshot(obj interface{}) bool {
func (nc *NodeController) snapshotHashRequired(volume *longhorn.Volume) bool {
dataIntegrityImmediateChecking, err := nc.ds.GetSettingAsBool(types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
if err != nil {
logrus.WithError(err).Warnf("Failed to get %v setting", types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
nc.logger.WithError(err).Warnf("Failed to get %v setting", types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
return false
}
if !dataIntegrityImmediateChecking {
Expand All @@ -241,7 +241,7 @@ func (nc *NodeController) snapshotHashRequired(volume *longhorn.Volume) bool {
if volume.Spec.SnapshotDataIntegrity == longhorn.SnapshotDataIntegrityIgnored {
dataIntegrity, err := nc.ds.GetSettingValueExisted(types.SettingNameSnapshotDataIntegrity)
if err != nil {
logrus.Warnf("Failed to get %v setting since %v", types.SettingNameSnapshotDataIntegrity, err)
nc.logger.WithError(err).Warnf("Failed to get %v setting", types.SettingNameSnapshotDataIntegrity)
return false
}

Expand Down Expand Up @@ -280,8 +280,8 @@ func (nc *NodeController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer nc.queue.ShutDown()

logrus.Info("Starting Longhorn node controller")
defer logrus.Info("Shut down Longhorn node controller")
nc.logger.Info("Starting Longhorn node controller")
defer nc.logger.Info("Shut down Longhorn node controller")

if !cache.WaitForNamedCacheSync("longhorn node", stopCh, nc.cacheSyncs...) {
return
Expand Down Expand Up @@ -351,12 +351,14 @@ func (nc *NodeController) syncNode(key string) (err error) {
node, err := nc.ds.GetNode(name)
if err != nil {
if datastore.ErrorIsNotFound(err) {
logrus.Errorf("Longhorn node %v has been deleted", key)
nc.logger.Errorf("Longhorn node %v has been deleted", key)
return nil
}
return err
}

log := getLoggerForNode(nc.logger, node)

if node.DeletionTimestamp != nil {
nc.eventRecorder.Eventf(node, corev1.EventTypeWarning, constant.EventReasonDelete, "Deleting node %v", node.Name)
return nc.ds.RemoveFinalizerForNode(node)
Expand All @@ -370,7 +372,7 @@ func (nc *NodeController) syncNode(key string) (err error) {
}
// requeue if it's conflict
if apierrors.IsConflict(errors.Cause(err)) {
logrus.Debugf("Requeue %v due to conflict: %v", key, err)
log.WithError(err).Debugf("Requeue %v due to conflict", key)
nc.enqueueNode(node)
err = nil
}
Expand Down Expand Up @@ -504,7 +506,7 @@ func (nc *NodeController) syncNode(key string) (err error) {
collectedDiskInfo, err := nc.syncWithDiskMonitor(node)
if err != nil {
if strings.Contains(err.Error(), "mismatching disks") {
logrus.Info(err.Error())
log.WithError(err).Info("Failed to sync with disk monitor due to mismatching disks")
return nil
}
return err
Expand All @@ -525,7 +527,7 @@ func (nc *NodeController) syncNode(key string) (err error) {
data, _ := nc.snapshotMonitor.GetCollectedData()
status, ok := data.(monitor.SnapshotMonitorStatus)
if !ok {
logrus.Errorf("Failed to assert value from snapshot monitor: %v", data)
log.Errorf("Failed to assert value from snapshot monitor: %v", data)
} else {
node.Status.SnapshotCheckStatus.LastPeriodicCheckedAt = status.LastSnapshotPeriodicCheckedAt
}
Expand Down Expand Up @@ -620,13 +622,13 @@ func (nc *NodeController) enqueueSnapshot(old, cur interface{}) {

volumeName, ok := currentSnapshot.Labels[types.LonghornLabelVolume]
if !ok {
logrus.Warnf("Failed to get volume name from snapshot %v", currentSnapshot.Name)
nc.logger.Warnf("Failed to get volume name from snapshot %v", currentSnapshot.Name)
return
}

volume, err := nc.ds.GetVolumeRO(volumeName)
if err != nil {
logrus.WithError(err).Warnf("Failed to get volume %v", currentSnapshot.Name)
nc.logger.WithError(err).Warnf("Failed to get volume %v", currentSnapshot.Name)
return
}

Expand All @@ -644,7 +646,7 @@ func (nc *NodeController) enqueueSnapshot(old, cur interface{}) {
SnapshotName: currentSnapshot.Name,
})
} else {
logrus.Warnf("Dropped the snapshot change event with volume %v snapshot %v since snapshotChangeEventQueue is full",
nc.logger.Warnf("Dropped the snapshot change event with volume %v snapshot %v since snapshotChangeEventQueue is full",
volume.Name, currentSnapshot.Name)
}
}
Expand Down Expand Up @@ -1087,9 +1089,11 @@ func (nc *NodeController) createInstanceManager(node *longhorn.Node, imName, imI
}

func (nc *NodeController) cleanUpBackingImagesInDisks(node *longhorn.Node) error {
log := getLoggerForNode(nc.logger, node)

settingValue, err := nc.ds.GetSettingAsInt(types.SettingNameBackingImageCleanupWaitInterval)
if err != nil {
logrus.WithError(err).Warnf("Failed to get setting %v, won't do cleanup for backing images", types.SettingNameBackingImageCleanupWaitInterval)
log.WithError(err).Warnf("Failed to get setting %v, won't do cleanup for backing images", types.SettingNameBackingImageCleanupWaitInterval)
return nil
}
waitInterval := time.Duration(settingValue) * time.Minute
Expand Down Expand Up @@ -1162,7 +1166,7 @@ func BackingImageDiskFileCleanup(node *longhorn.Node, bi *longhorn.BackingImage,
}
lastRefAt, err := util.ParseTime(lastRefAtStr)
if err != nil {
logrus.Warnf("Failed to parse LastRefAt timestamp %v for backing image %v", lastRefAtStr, bi.Name)
logrus.WithError(err).Warnf("Failed to parse LastRefAt timestamp %v for backing image %v", lastRefAtStr, bi.Name)
continue
}
if !time.Now().After(lastRefAt.Add(waitInterval)) {
Expand Down Expand Up @@ -1245,7 +1249,7 @@ func (nc *NodeController) getNewAndMissingOrphanedReplicaDirectoryNames(diskName
// Find out the new/missing orphaned directories by checking with orphan CRs
orphanList, err := nc.ds.ListOrphansByNodeRO(nc.controllerID)
if err != nil {
logrus.Warnf("Failed to list orphans for node %v since %v", nc.controllerID, err.Error())
nc.logger.WithError(err).Warnf("Failed to list orphans for node %v", nc.controllerID)
return map[string]string{}, map[string]string{}
}
orphanMap := make(map[string]*longhorn.Orphan, len(orphanList))
Expand Down
6 changes: 3 additions & 3 deletions controller/recurring_job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ func (c *RecurringJobController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()

logrus.Info("Starting Longhorn Recurring Job controller")
defer logrus.Info("Shut down Longhorn Recurring Job controller")
c.logger.Info("Starting Longhorn Recurring Job controller")
defer c.logger.Info("Shut down Longhorn Recurring Job controller")

if !cache.WaitForNamedCacheSync("longhorn recurring jobs", stopCh, c.cacheSyncs...) {
return
Expand Down Expand Up @@ -294,7 +294,7 @@ func (c *RecurringJobController) cleanupRecurringJobLabelInVolumesAndPVCs(recurr

func (c *RecurringJobController) removeRecurringJobLabelInVolume(volume *longhorn.Volume, labelKey string) *longhorn.Volume {
if _, exist := volume.Labels[labelKey]; exist {
logrus.Infof("Removing volume %v recurring job label %v", volume.Name, labelKey)
c.logger.Infof("Removing volume %v recurring job label %v", volume.Name, labelKey)
delete(volume.Labels, labelKey)
}
return volume
Expand Down

0 comments on commit a94d1cd

Please sign in to comment.