diff --git a/accelerators/nvidia.go b/accelerators/nvidia.go index ac528db0e1..054d206b2a 100644 --- a/accelerators/nvidia.go +++ b/accelerators/nvidia.go @@ -47,7 +47,7 @@ const nvidiaVendorId = "0x10de" // Setup initializes NVML if nvidia devices are present on the node. func (nm *NvidiaManager) Setup() { if !detectDevices(nvidiaVendorId) { - glog.Info("No NVIDIA devices found.") + glog.V(4).Info("No NVIDIA devices found.") return } @@ -56,7 +56,7 @@ func (nm *NvidiaManager) Setup() { return } go func() { - glog.Info("Starting goroutine to initialize NVML") + glog.V(2).Info("Starting goroutine to initialize NVML") // TODO: use globalHousekeepingInterval for range time.Tick(time.Minute) { nm.initializeNVML() @@ -71,7 +71,7 @@ func (nm *NvidiaManager) Setup() { func detectDevices(vendorId string) bool { devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) if err != nil { - glog.Warningf("error reading %q: %v", sysFsPCIDevicesPath, err) + glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err) return false } @@ -79,11 +79,11 @@ func detectDevices(vendorId string) bool { vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") content, err := ioutil.ReadFile(vendorPath) if err != nil { - glog.Infof("Error while reading %q: %v", vendorPath, err) + glog.V(4).Infof("Error while reading %q: %v", vendorPath, err) continue } if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) { - glog.Infof("Found device with vendorId %q", vendorId) + glog.V(3).Infof("Found device with vendorId %q", vendorId) return true } } @@ -95,7 +95,7 @@ func (nm *NvidiaManager) initializeNVML() { if err := gonvml.Initialize(); err != nil { // This is under a logging level because otherwise we may cause // log spam if the drivers/nvml is not installed on the system. - glog.V(3).Infof("Could not initialize NVML: %v", err) + glog.V(4).Infof("Could not initialize NVML: %v", err) return } numDevices, err := gonvml.DeviceCount() @@ -107,7 +107,7 @@ func (nm *NvidiaManager) initializeNVML() { nm.Unlock() return } - glog.Infof("NVML initialized. Number of nvidia devices: %v", numDevices) + glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices) nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices) for i := 0; i < int(numDevices); i++ { device, err := gonvml.DeviceHandleByIndex(uint(i)) diff --git a/cadvisor.go b/cadvisor.go index df8146a438..56fa7f747b 100644 --- a/cadvisor.go +++ b/cadvisor.go @@ -156,7 +156,7 @@ func main() { // Install signal handler. installSignalHandler(containerManager) - glog.Infof("Starting cAdvisor version: %s-%s on port %d", version.Info["version"], version.Info["revision"], *argPort) + glog.V(1).Infof("Starting cAdvisor version: %s-%s on port %d", version.Info["version"], version.Info["revision"], *argPort) addr := fmt.Sprintf("%s:%d", *argIp, *argPort) glog.Fatal(http.ListenAndServe(addr, mux)) diff --git a/container/containerd/factory.go b/container/containerd/factory.go index a021538b16..dba43ef32e 100644 --- a/container/containerd/factory.go +++ b/container/containerd/factory.go @@ -133,7 +133,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.Infof("Registering containerd factory") + glog.V(1).Infof("Registering containerd factory") f := &containerdFactory{ cgroupSubsystems: cgroupSubsystems, client: client, diff --git a/container/crio/factory.go b/container/crio/factory.go index b0151c7f12..0c77db69ed 100644 --- a/container/crio/factory.go +++ b/container/crio/factory.go @@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.Infof("Registering CRI-O factory") + glog.V(1).Infof("Registering CRI-O factory") f := &crioFactory{ client: client, cgroupSubsystems: cgroupSubsystems, diff --git a/container/crio/handler.go b/container/crio/handler.go index 391d383322..024341da8a 100644 --- a/container/crio/handler.go +++ b/container/crio/handler.go @@ -185,7 +185,7 @@ func newCrioContainerHandler( } // TODO for env vars we wanted to show from container.Config.Env from whitelist //for _, exposedEnv := range metadataEnvs { - //glog.Infof("TODO env whitelist: %v", exposedEnv) + //glog.V(4).Infof("TODO env whitelist: %v", exposedEnv) //} return handler, nil diff --git a/container/docker/factory.go b/container/docker/factory.go index a5ce4d1424..903ce38afe 100644 --- a/container/docker/factory.go +++ b/container/docker/factory.go @@ -352,7 +352,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c } } - glog.Infof("Registering Docker factory") + glog.V(1).Infof("Registering Docker factory") f := &dockerFactory{ cgroupSubsystems: cgroupSubsystems, client: client, diff --git a/container/raw/factory.go b/container/raw/factory.go index 36d236c8dd..1b8a43a407 100644 --- a/container/raw/factory.go +++ b/container/raw/factory.go @@ -83,7 +83,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return err } - glog.Infof("Registering Raw factory") + glog.V(1).Infof("Registering Raw factory") factory := &rawFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, diff --git a/container/rkt/factory.go b/container/rkt/factory.go index f29c615eba..3f79d753e0 100644 --- a/container/rkt/factory.go +++ b/container/rkt/factory.go @@ -86,7 +86,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") } - glog.Infof("Registering Rkt factory") + glog.V(1).Infof("Registering Rkt factory") factory := &rktFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, diff --git a/container/systemd/factory.go b/container/systemd/factory.go index 4e71d40bda..cb3b7c89cd 100644 --- a/container/systemd/factory.go +++ b/container/systemd/factory.go @@ -51,7 +51,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string { // Register registers the systemd container factory. func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { - glog.Infof("Registering systemd factory") + glog.V(1).Infof("Registering systemd factory") factory := &systemdFactory{} container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw}) return nil diff --git a/fs/fs.go b/fs/fs.go index b2eb7bd6c3..271b01e356 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -136,8 +136,8 @@ func NewFsInfo(context Context) (FsInfo, error) { fsInfo.addDockerImagesLabel(context, mounts) fsInfo.addCrioImagesLabel(context, mounts) - glog.Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) - glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) + glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) + glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions) fsInfo.addSystemRootLabel(mounts) return fsInfo, nil } @@ -162,7 +162,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) { path := filepath.Join(dir, file.Name()) target, err := os.Readlink(path) if err != nil { - glog.Infof("Failed to resolve symlink for %q", path) + glog.Warningf("Failed to resolve symlink for %q", path) continue } device, err := filepath.Abs(filepath.Join(dir, target)) @@ -438,7 +438,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) { file, err := os.Open(diskStatsFile) if err != nil { if os.IsNotExist(err) { - glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile) + glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile) return diskStatsMap, nil } return nil, err @@ -561,12 +561,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec du - %v", err) } timer := time.AfterFunc(timeout, func() { - glog.Infof("killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) + glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) cmd.Process.Kill() }) stdoutb, souterr := ioutil.ReadAll(stdoutp) if souterr != nil { - glog.Errorf("failed to read from stdout for cmd %v - %v", cmd.Args, souterr) + glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr) } stderrb, _ := ioutil.ReadAll(stderrp) err = cmd.Wait() @@ -600,7 +600,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String()) } timer := time.AfterFunc(timeout, func() { - glog.Infof("killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) + glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) findCmd.Process.Kill() }) err := findCmd.Wait() @@ -741,7 +741,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.Infof("btrfs mount %#v", mount) + glog.V(4).Infof("btrfs mount %#v", mount) if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK { err := syscall.Stat(mount.Mountpoint, buf) if err != nil { @@ -749,8 +749,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) - glog.Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) + glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) + glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) return int(major(buf.Dev)), int(minor(buf.Dev)), nil } else { diff --git a/http/handlers.go b/http/handlers.go index a2b4055dde..8950072b4b 100644 --- a/http/handlers.go +++ b/http/handlers.go @@ -60,7 +60,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut // Setup the authenticator object if httpAuthFile != "" { - glog.Infof("Using auth file %s", httpAuthFile) + glog.V(1).Infof("Using auth file %s", httpAuthFile) secrets := auth.HtpasswdFileProvider(httpAuthFile) authenticator := auth.NewBasicAuthenticator(httpAuthRealm, secrets) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) @@ -70,7 +70,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut authenticated = true } if httpAuthFile == "" && httpDigestFile != "" { - glog.Infof("Using digest file %s", httpDigestFile) + glog.V(1).Infof("Using digest file %s", httpDigestFile) secrets := auth.HtdigestFileProvider(httpDigestFile) authenticator := auth.NewDigestAuthenticator(httpDigestRealm, secrets) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) diff --git a/machine/info.go b/machine/info.go index 282f71dee4..be90f17e96 100644 --- a/machine/info.go +++ b/machine/info.go @@ -49,7 +49,7 @@ func getInfoFromFiles(filePaths string) string { return strings.TrimSpace(string(id)) } } - glog.Infof("Couldn't collect info from any of the files in %q", filePaths) + glog.Warningf("Couldn't collect info from any of the files in %q", filePaths) return "" } diff --git a/manager/container.go b/manager/container.go index 1c3194bbf1..8193bb5e37 100644 --- a/manager/container.go +++ b/manager/container.go @@ -377,8 +377,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h // Create cpu load reader. loadReader, err := cpuload.New() if err != nil { - // TODO(rjnagal): Promote to warning once we support cpu load inside namespaces. - glog.Infof("Could not initialize cpu load reader for %q: %s", ref.Name, err) + glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err) } else { cont.loadReader = loadReader } @@ -467,7 +466,7 @@ func (c *containerData) housekeeping() { stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples) if err != nil { if c.allowErrorLogging() { - glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) + glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) } } else if len(stats) < numSamples { // Ignore, not enough stats yet. @@ -483,6 +482,7 @@ func (c *containerData) housekeeping() { instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds()) usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInHuman := units.HumanSize(float64(usageMemory)) + // Don't set verbosity since this is already protected by the logUsage flag. glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) } } @@ -504,7 +504,7 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin err := c.updateStats() if err != nil { if c.allowErrorLogging() { - glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err) + glog.Warning("Failed to update stats for container \"%s\": %s", c.info.Name, err) } } // Log if housekeeping took too long. diff --git a/manager/manager.go b/manager/manager.go index fff62b369a..be55ad2720 100644 --- a/manager/manager.go +++ b/manager/manager.go @@ -148,7 +148,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn if err != nil { return nil, err } - glog.Infof("cAdvisor running in container: %q", selfContainer) + glog.V(2).Infof("cAdvisor running in container: %q", selfContainer) var ( dockerStatus info.DockerStatus @@ -222,13 +222,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn return nil, err } newManager.machineInfo = *machineInfo - glog.Infof("Machine: %+v", newManager.machineInfo) + glog.V(1).Infof("Machine: %+v", newManager.machineInfo) versionInfo, err := getVersionInfo() if err != nil { return nil, err } - glog.Infof("Version: %+v", *versionInfo) + glog.V(1).Infof("Version: %+v", *versionInfo) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) return newManager, nil @@ -326,12 +326,12 @@ func (self *manager) Start() error { if err != nil { return err } - glog.Infof("Starting recovery of all containers") + glog.V(2).Infof("Starting recovery of all containers") err = self.detectSubcontainers("/") if err != nil { return err } - glog.Infof("Recovery completed") + glog.V(2).Infof("Recovery completed") // Watch for new container. quitWatcher := make(chan error) @@ -849,29 +849,25 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c if err != nil { return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err) } - glog.V(3).Infof("Got config from %q: %q", v, configFile) + glog.V(4).Infof("Got config from %q: %q", v, configFile) if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") { newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) if err != nil { - glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) } err = cont.collectorManager.RegisterCollector(newCollector) if err != nil { - glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) } } else { newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) if err != nil { - glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) } err = cont.collectorManager.RegisterCollector(newCollector) if err != nil { - glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) } } } @@ -946,11 +942,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche } devicesCgroupPath, err := handler.GetCgroupPath("devices") if err != nil { - glog.Infof("Error getting devices cgroup path: %v", err) + glog.Warningf("Error getting devices cgroup path: %v", err) } else { cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath) if err != nil { - glog.Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) + glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) } } @@ -959,7 +955,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche collectorConfigs := collector.GetCollectorConfigs(labels) err = m.registerCollectors(collectorConfigs, cont) if err != nil { - glog.Infof("failed to register collectors for %q: %v", containerName, err) + glog.Warningf("Failed to register collectors for %q: %v", containerName, err) } // Add the container name and all its aliases. The aliases must be within the namespace of the factory. @@ -1179,7 +1175,7 @@ func (self *manager) watchForNewContainers(quit chan error) error { } func (self *manager) watchForNewOoms() error { - glog.Infof("Started watching for new ooms in manager") + glog.V(2).Infof("Started watching for new ooms in manager") outStream := make(chan *oomparser.OomInstance, 10) oomLog, err := oomparser.New() if err != nil { diff --git a/manager/watcher/rkt/rkt.go b/manager/watcher/rkt/rkt.go index d54c628886..4c54d9b94e 100644 --- a/manager/watcher/rkt/rkt.go +++ b/manager/watcher/rkt/rkt.go @@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error { } func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) { - glog.Infof("starting detectRktContainers thread") + glog.V(1).Infof("Starting detectRktContainers thread") ticker := time.Tick(10 * time.Second) curpods := make(map[string]*rktapi.Pod) @@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan for id, pod := range curpods { if _, ok := newpods[id]; !ok { for _, cgroup := range podToCgroup(pod) { - glog.Infof("cgroup to delete = %v", cgroup) + glog.V(2).Infof("cgroup to delete = %v", cgroup) self.sendDestroyEvent(cgroup, events) } } diff --git a/storagedriver.go b/storagedriver.go index a72bc144fc..1787abce75 100644 --- a/storagedriver.go +++ b/storagedriver.go @@ -45,8 +45,8 @@ func NewMemoryStorage() (*memory.InMemoryCache, error) { return nil, err } if *storageDriver != "" { - glog.Infof("Using backend storage type %q", *storageDriver) + glog.V(1).Infof("Using backend storage type %q", *storageDriver) } - glog.Infof("Caching stats in memory for %v", *storageDuration) + glog.V(1).Infof("Caching stats in memory for %v", *storageDuration) return memory.New(*storageDuration, backendStorage), nil } diff --git a/utils/cpuload/cpuload.go b/utils/cpuload/cpuload.go index e536d90be1..f3d29b8dd0 100644 --- a/utils/cpuload/cpuload.go +++ b/utils/cpuload/cpuload.go @@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) { if err != nil { return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err) } - glog.V(3).Info("Using a netlink-based load reader") + glog.V(4).Info("Using a netlink-based load reader") return reader, nil } diff --git a/utils/oomparser/oomparser.go b/utils/oomparser/oomparser.go index 184cdd73fd..a73243f2e3 100644 --- a/utils/oomparser/oomparser.go +++ b/utils/oomparser/oomparser.go @@ -143,7 +143,7 @@ func (glogAdapter) Infof(format string, args ...interface{}) { glog.V(4).Infof(format, args) } func (glogAdapter) Warningf(format string, args ...interface{}) { - glog.Infof(format, args) + glog.V(2).Infof(format, args) } func (glogAdapter) Errorf(format string, args ...interface{}) { glog.Warningf(format, args)