Skip to content

Commit

Permalink
Merge pull request google#2069 from sashankreddya/process_fd_metrics
Browse files Browse the repository at this point in the history
Emit number of processes and file descriptors of a container
  • Loading branch information
dashpole authored Nov 1, 2018
2 parents 32b5c68 + 02ecf72 commit 8faf190
Show file tree
Hide file tree
Showing 8 changed files with 102 additions and 19 deletions.
5 changes: 4 additions & 1 deletion cadvisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ var (
container.NetworkTcpUsageMetrics: struct{}{},
container.NetworkUdpUsageMetrics: struct{}{},
container.ProcessSchedulerMetrics: struct{}{},
container.ProcessMetrics: struct{}{},
}}

// List of metrics that can be ignored.
Expand All @@ -78,6 +79,7 @@ var (
container.NetworkUdpUsageMetrics: struct{}{},
container.PerCpuUsageMetrics: struct{}{},
container.ProcessSchedulerMetrics: struct{}{},
container.ProcessMetrics: struct{}{},
}
)

Expand Down Expand Up @@ -109,7 +111,7 @@ func (ml *metricSetValue) Set(value string) error {
}

func init() {
flag.Var(&ignoreMetrics, "disable_metrics", "comma-separated list of `metrics` to be disabled. Options are 'disk', 'network', 'tcp', 'udp', 'percpu'. Note: tcp and udp are disabled by default due to high CPU usage.")
flag.Var(&ignoreMetrics, "disable_metrics", "comma-separated list of `metrics` to be disabled. Options are 'disk', 'network', 'tcp', 'udp', 'percpu', 'sched', 'process'. Note: tcp and udp are disabled by default due to high CPU usage.")

// Default logging verbosity to V(2)
flag.Set("v", "2")
Expand Down Expand Up @@ -251,6 +253,7 @@ func toIncludedMetrics(ignoreMetrics container.MetricSet) container.MetricSet {
container.NetworkUdpUsageMetrics,
container.AcceleratorUsageMetrics,
container.AppMetrics,
container.ProcessMetrics,
}
for _, metric := range allMetrics {
if !ignoreMetrics.Has(metric) {
Expand Down
1 change: 1 addition & 0 deletions container/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ const (
NetworkUdpUsageMetrics MetricKind = "udp"
AcceleratorUsageMetrics MetricKind = "accelerator"
AppMetrics MetricKind = "app"
ProcessMetrics MetricKind = "process"
)

func (mk MetricKind) String() string {
Expand Down
2 changes: 2 additions & 0 deletions docs/storage/prometheus.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ Metric name | Type | Description | Unit (where applicable)
`container_cpu_system_seconds_total` | Counter | Cumulative system cpu time consumed | seconds
`container_cpu_usage_seconds_total` | Counter | Cumulative cpu time consumed | seconds
`container_cpu_user_seconds_total` | Counter | Cumulative user cpu time consumed | seconds
`container_file_descriptors` | Gauge | Number of open file descriptors for the container |
`container_fs_inodes_free` | Gauge | Number of available Inodes |
`container_fs_inodes_total` | Gauge | Total number of Inodes |
`container_fs_io_current` | Gauge | Number of I/Os currently in progress |
Expand Down Expand Up @@ -66,6 +67,7 @@ Metric name | Type | Description | Unit (where applicable)
`container_network_transmit_errors_total` | Counter | Cumulative count of errors encountered while transmitting |
`container_network_tcp_usage_total` | Gauge | tcp connection usage statistic for container |
`container_network_udp_usage_total` | Gauge | udp connection usage statistic for container |
`container_processes` | Gauge | Number of processes running inside the container |
`container_spec_cpu_period` | Gauge | CPU period of the container |
`container_spec_cpu_quota` | Gauge | CPU quota of the container |
`container_spec_cpu_shares` | Gauge | CPU share of the container |
Expand Down
1 change: 1 addition & 0 deletions info/v2/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@ type ProcessInfo struct {
RunningTime string `json:"running_time"`
CgroupPath string `json:"cgroup_path"`
Cmd string `json:"cmd"`
FdCount int `json:"fd_count"`
}

type TcpStat struct {
Expand Down
22 changes: 19 additions & 3 deletions manager/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ import (
var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader")
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")

var cgroupPathRegExp = regexp.MustCompile(`devices[^:]*:(.*?)[,;$]`)
// cgroup type chosen to fetch the cgroup path of a process.
// Memory has been chosen, as it is one of the default cgroups that is enabled for most containers.
var cgroupPathRegExp = regexp.MustCompile(`memory[^:]*:(.*?)[,;$]`)

type containerInfo struct {
info.ContainerReference
Expand Down Expand Up @@ -185,8 +187,8 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
}
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
if len(matches) != 2 {
glog.V(3).Infof("failed to get devices cgroup path from %q", cgroups)
// return root in case of failures - devices hierarchy might not be enabled.
glog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
// return root in case of failures - memory hierarchy might not be enabled.
return "/", nil
}
return string(matches[1]), nil
Expand Down Expand Up @@ -266,6 +268,10 @@ func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error)
func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
// report all processes for root.
isRoot := c.info.Name == "/"
rootfs := "/"
if !inHostNamespace {
rootfs = "/rootfs"
}
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup"
out, err := c.getPsOutput(inHostNamespace, format)
if err != nil {
Expand Down Expand Up @@ -324,6 +330,15 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
cgroupPath = cgroup
}

var fdCount int
dirPath := path.Join(rootfs, "/proc", strconv.Itoa(pid), "fd")
fds, err := ioutil.ReadDir(dirPath)
if err != nil {
glog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
continue
}
fdCount = len(fds)

if isRoot || c.info.Name == cgroup {
processes = append(processes, v2.ProcessInfo{
User: fields[0],
Expand All @@ -338,6 +353,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
RunningTime: fields[9],
Cmd: fields[10],
CgroupPath: cgroupPath,
FdCount: fdCount,
})
}
}
Expand Down
55 changes: 40 additions & 15 deletions metrics/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (

"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"

"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
Expand All @@ -35,6 +36,8 @@ type infoProvider interface {
GetVersionInfo() (*info.VersionInfo, error)
// GetMachineInfo provides information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// GetProcessList provides information about each container's processes
GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error)
}

// metricValue describes a single metric value for a given set of label values
Expand Down Expand Up @@ -109,6 +112,7 @@ type PrometheusCollector struct {
errors prometheus.Gauge
containerMetrics []containerMetric
containerLabelsFunc ContainerLabelsFunc
includedMetrics container.MetricSet
}

// NewPrometheusCollector returns a new PrometheusCollector. The passed
Expand Down Expand Up @@ -137,6 +141,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
},
},
},
includedMetrics: includedMetrics,
}
if includedMetrics.Has(container.CpuUsageMetrics) {
c.containerMetrics = append(c.containerMetrics, []containerMetric{
Expand Down Expand Up @@ -926,46 +931,66 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
rawLabels[l] = struct{}{}
}
}
for _, container := range containers {

psReqOpt := v2.RequestOptions{
IdType: v2.TypeName,
}

for _, cont := range containers {
values := make([]string, 0, len(rawLabels))
labels := make([]string, 0, len(rawLabels))
containerLabels := c.containerLabelsFunc(container)
containerLabels := c.containerLabelsFunc(cont)
for l := range rawLabels {
labels = append(labels, sanitizeLabelName(l))
values = append(values, containerLabels[l])
}

// Container spec
desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), values...)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.CreationTime.Unix()), values...)

if container.Spec.HasCpu {
if cont.Spec.HasCpu {
desc = prometheus.NewDesc("container_spec_cpu_period", "CPU period of the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Period), values...)
if container.Spec.Cpu.Quota != 0 {
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Period), values...)
if cont.Spec.Cpu.Quota != 0 {
desc = prometheus.NewDesc("container_spec_cpu_quota", "CPU quota of the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Quota), values...)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Quota), values...)
}
desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), values...)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Limit), values...)

}
if container.Spec.HasMemory {
if cont.Spec.HasMemory {
desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), values...)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.Limit), values...)
desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), values...)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.SwapLimit), values...)
desc = prometheus.NewDesc("container_spec_memory_reservation_limit_bytes", "Memory reservation limit for the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Reservation), values...)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.Reservation), values...)
}

if c.includedMetrics.Has(container.ProcessMetrics) {
psList, err := c.infoProvider.GetProcessList(cont.Name, psReqOpt)
if err == nil {
desc = prometheus.NewDesc("container_processes", "Number of processes running inside the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(len(psList)), values...)

var fd int
for _, ps := range psList {
fd += ps.FdCount
}
desc = prometheus.NewDesc("container_file_descriptors", "Number of open file descriptors for the container.", labels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(fd), values...)
}
}

// Now for the actual metrics
if len(container.Stats) == 0 {
if len(cont.Stats) == 0 {
continue
}
stats := container.Stats[0]
stats := cont.Stats[0]
for _, cm := range c.containerMetrics {
if cm.condition != nil && !cm.condition(container.Spec) {
if cm.condition != nil && !cm.condition(cont.Spec) {
continue
}
desc := cm.desc(labels)
Expand Down
29 changes: 29 additions & 0 deletions metrics/prometheus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (

"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"

"github.com/prometheus/client_golang/prometheus"
)
Expand All @@ -49,6 +50,26 @@ func (p testSubcontainersInfoProvider) GetMachineInfo() (*info.MachineInfo, erro
}, nil
}

func (p testSubcontainersInfoProvider) GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
return []v2.ProcessInfo{
{
User: "user1",
Pid: 1,
Ppid: 2,
StartTime: "OCT2018",
PercentCpu: 0.0,
PercentMemory: 0.0,
RSS: 3,
VirtualSize: 4,
Status: "S",
RunningTime: "00:00:00",
Cmd: "cmd1",
CgroupPath: "path",
FdCount: 5,
},
}, nil
}

var allMetrics = container.MetricSet{
container.CpuUsageMetrics: struct{}{},
container.ProcessSchedulerMetrics: struct{}{},
Expand All @@ -61,6 +82,7 @@ var allMetrics = container.MetricSet{
container.NetworkUsageMetrics: struct{}{},
container.NetworkTcpUsageMetrics: struct{}{},
container.NetworkUdpUsageMetrics: struct{}{},
container.ProcessMetrics: struct{}{},
}

func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
Expand Down Expand Up @@ -305,6 +327,13 @@ func (p *erroringSubcontainersInfoProvider) GetMachineInfo() (*info.MachineInfo,
return p.successfulProvider.GetMachineInfo()
}

func (p *erroringSubcontainersInfoProvider) GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
if p.shouldFail {
return nil, errors.New("Oops 2")
}
return p.successfulProvider.GetProcessList(containerName, options)
}

func (p *erroringSubcontainersInfoProvider) SubcontainersInfo(
a string, r *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
if p.shouldFail {
Expand Down
6 changes: 6 additions & 0 deletions metrics/testdata/prometheus_metrics
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_f
# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds.
# TYPE container_cpu_user_seconds_total counter
container_cpu_user_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 6e-09
# HELP container_file_descriptors Number of open file descriptors for the container.
# TYPE container_file_descriptors gauge
container_file_descriptors{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 5
# HELP container_fs_inodes_free Number of available Inodes
# TYPE container_fs_inodes_free gauge
container_fs_inodes_free{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 524288
Expand Down Expand Up @@ -182,6 +185,9 @@ container_network_udp_usage_total{container_env_foo_env="prod",container_label_f
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="listen",zone_name="hello"} 0
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="rxqueued",zone_name="hello"} 0
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="txqueued",zone_name="hello"} 0
# HELP container_processes Number of processes running inside the container.
# TYPE container_processes gauge
container_processes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
# TYPE container_scrape_error gauge
container_scrape_error 0
Expand Down

0 comments on commit 8faf190

Please sign in to comment.