Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: new input Hugepages plugin #10763

Merged
merged 16 commits into from
Mar 16, 2022
Prev Previous commit
Next Next commit
feat: new input Hugepages plugin
  • Loading branch information
Pawel Zak authored and Pawel Zak committed Mar 6, 2022
commit 2d936854f53ef5258fe2cf160ff8e9f143acf3ed
2 changes: 1 addition & 1 deletion plugins/inputs/hugepages/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Consult <https://www.kernel.org/doc/html/latest/admin-guide/mm/hugetlbpage.html>
## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages
## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages
## - "meminfo" - based on /proc/meminfo file
# hugepages_types = ["root", "per_node"]
# types = ["root", "per_node"]
```

## Measurements
Expand Down
84 changes: 44 additions & 40 deletions plugins/inputs/hugepages/hugepages.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,14 @@ const (
rootHugepages = "root"
perNodeHugepages = "per_node"
meminfoHugepages = "meminfo"

hugepagesSampleConfig = `
## Supported huge page types:
## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages
## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages
## - "meminfo" - based on /proc/meminfo file
# types = ["root", "per_node"]
`
)

var (
Expand Down Expand Up @@ -62,16 +70,8 @@ var (
}
)

var hugepagesSampleConfig = `
## Supported huge page types:
## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages
## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages
## - "meminfo" - based on /proc/meminfo file
# hugepages_types = ["root", "per_node"]
`

type Hugepages struct {
HugepagesTypes []string `toml:"hugepages_types"`
Types []string `toml:"types"`

gatherRoot bool
gatherPerNode bool
Expand Down Expand Up @@ -109,21 +109,21 @@ func (h *Hugepages) Gather(acc telegraf.Accumulator) error {
if h.gatherRoot {
err = h.gatherRootStats(acc)
if err != nil {
return err
return fmt.Errorf("gathering root stats failed: %v", err)
}
}

if h.gatherPerNode {
err = h.gatherStatsPerNode(acc)
if err != nil {
return err
return fmt.Errorf("gathering per node stats failed: %v", err)
}
}

if h.gatherMeminfo {
err = h.gatherStatsFromMeminfo(acc)
if err != nil {
return err
return fmt.Errorf("gathering meminfo stats failed: %v", err)
}
}

Expand All @@ -132,7 +132,7 @@ func (h *Hugepages) Gather(acc telegraf.Accumulator) error {

// gatherStatsPerNode collects root hugepages statistics
func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error {
return h.gatherFromHugepagePath(h.rootHugepagePath, hugepagesMetricsRoot, "hugepages_"+rootHugepages, nil, acc)
return h.gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)
}

// gatherStatsPerNode collects hugepages statistics per NUMA node
Expand All @@ -158,18 +158,17 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {
"node": nodeNumber,
}
hugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), "hugepages")
err = h.gatherFromHugepagePath(hugepagesPath, hugepagesMetricsPerNUMANode, "hugepages_"+perNodeHugepages, perNodeTags, acc)
err = h.gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)
if err != nil {
return err
}
}
return nil
}

func (h *Hugepages) gatherFromHugepagePath(hugepagesPath string, possibleMetrics []string, measurementName string,
tagsToUse map[string]string, acc telegraf.Accumulator) error {
func (h *Hugepages) gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter []string, defaultTags map[string]string) error {
// read metrics from: hugepages/hugepages-*/*
hugepagesDirs, err := ioutil.ReadDir(hugepagesPath)
hugepagesDirs, err := ioutil.ReadDir(path)
if err != nil {
return err
}
Expand All @@ -185,40 +184,43 @@ func (h *Hugepages) gatherFromHugepagePath(hugepagesPath string, possibleMetrics
continue
}

metricsPath := filepath.Join(hugepagesPath, hugepagesDir.Name())
metricsPath := filepath.Join(path, hugepagesDir.Name())
metricFiles, err := ioutil.ReadDir(metricsPath)
if err != nil {
return err
}

metrics := make(map[string]interface{})
for _, metricFile := range metricFiles {
if mode := metricFile.Mode(); !mode.IsRegular() || !choice.Contains(metricFile.Name(), possibleMetrics) {
if mode := metricFile.Mode(); !mode.IsRegular() || !choice.Contains(metricFile.Name(), fileFilter) {
continue
}

metricBytes, err := ioutil.ReadFile(filepath.Join(metricsPath, metricFile.Name()))
metricFullPath := filepath.Join(metricsPath, metricFile.Name())
metricBytes, err := ioutil.ReadFile(metricFullPath)
if err != nil {
return err
}

metricValue, err := strconv.Atoi(string(bytes.TrimSuffix(metricBytes, newlineByte)))
if err != nil {
return err
return fmt.Errorf("failed to convert content of '%s': %v", metricFullPath, err)
}

metrics[metricFile.Name()] = metricValue
}

if len(metrics) > 0 {
tags := make(map[string]string)
for key, value := range tagsToUse {
tags[key] = value
}
tags["hugepages_size_kb"] = hugepagesSize
if len(metrics) == 0 {
continue
}

acc.AddFields(measurementName, metrics, tags)
tags := make(map[string]string)
for key, value := range defaultTags {
tags[key] = value
}
tags["hugepages_size_kb"] = hugepagesSize

acc.AddFields(measurement, metrics, tags)
}
return nil
}
Expand All @@ -238,17 +240,19 @@ func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {
continue
}
fieldName := string(bytes.TrimSuffix(fields[0], colonByte))
if choice.Contains(fieldName, hugepagesMetricsFromMeminfo) {
fieldValue, err := strconv.Atoi(string(fields[1]))
if err != nil {
return err
}
if !choice.Contains(fieldName, hugepagesMetricsFromMeminfo) {
continue
}

if bytes.Contains(line, kbPrecisionByte) {
fieldName = fieldName + "_kb"
}
metrics[fieldName] = fieldValue
fieldValue, err := strconv.Atoi(string(fields[1]))
if err != nil {
return fmt.Errorf("failed to convert content of '%s': %v", fieldName, err)
}

if bytes.Contains(line, kbPrecisionByte) {
fieldName = fieldName + "_kb"
}
metrics[fieldName] = fieldValue
}

acc.AddFields("hugepages_"+meminfoHugepages, metrics, map[string]string{})
Expand All @@ -257,18 +261,18 @@ func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {

func (h *Hugepages) parseHugepagesConfig() error {
// default
if h.HugepagesTypes == nil {
if h.Types == nil {
h.gatherRoot = true
h.gatherMeminfo = true
return nil
}

// empty array
if len(h.HugepagesTypes) == 0 {
if len(h.Types) == 0 {
return fmt.Errorf("plugin was configured with nothing to read")
}

for _, hugepagesType := range h.HugepagesTypes {
for _, hugepagesType := range h.Types {
switch hugepagesType {
case rootHugepages:
h.gatherRoot = true
Expand Down
95 changes: 58 additions & 37 deletions plugins/inputs/hugepages/hugepages_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ func TestInit(t *testing.T) {
})

t.Run("when empty hugepages types is provided then plugin should fail to initialize", func(t *testing.T) {
h := Hugepages{HugepagesTypes: []string{}}
h := Hugepages{Types: []string{}}
err := h.Init()

require.Error(t, err)
require.Contains(t, err.Error(), "plugin was configured with nothing to read")
})

t.Run("when valid hugepages types is provided then proper flags should be set", func(t *testing.T) {
h := Hugepages{HugepagesTypes: []string{"root", "per_node", "meminfo"}}
h := Hugepages{Types: []string{"root", "per_node", "meminfo"}}
err := h.Init()

require.NoError(t, err)
Expand All @@ -44,7 +44,7 @@ func TestInit(t *testing.T) {
})

t.Run("when hugepages types contains not supported value then plugin should fail to initialize", func(t *testing.T) {
h := Hugepages{HugepagesTypes: []string{"root", "per_node", "linux_hdd", "meminfo"}}
h := Hugepages{Types: []string{"root", "per_node", "linux_hdd", "meminfo"}}
err := h.Init()

require.Error(t, err)
Expand All @@ -62,19 +62,25 @@ func TestGather(t *testing.T) {
acc := &testutil.Accumulator{}
require.NoError(t, h.Gather(acc))

require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "2048"}, "free_hugepages", 883))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "2048"}, "resv_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "2048"}, "surplus_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "2048"}, "nr_hugepages_mempolicy", 2048))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "2048"}, "nr_hugepages", 2048))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "2048"}, "nr_overcommit_hugepages", 0))

require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "1048576"}, "free_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "1048576"}, "resv_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "1048576"}, "surplus_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "1048576"}, "nr_hugepages_mempolicy", 8))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "1048576"}, "nr_hugepages", 8))
require.True(t, acc.HasPoint("hugepages_root", map[string]string{"hugepages_size_kb": "1048576"}, "nr_overcommit_hugepages", 0))
expectedFields := map[string]interface{}{
"free_hugepages": 883,
"resv_hugepages": 0,
"surplus_hugepages": 0,
"nr_hugepages_mempolicy": 2048,
"nr_hugepages": 2048,
"nr_overcommit_hugepages": 0,
}
acc.AssertContainsTaggedFields(t, "hugepages_root", expectedFields, map[string]string{"hugepages_size_kb": "2048"})

expectedFields = map[string]interface{}{
"free_hugepages": 0,
"resv_hugepages": 0,
"surplus_hugepages": 0,
"nr_hugepages_mempolicy": 8,
"nr_hugepages": 8,
"nr_overcommit_hugepages": 0,
}
acc.AssertContainsTaggedFields(t, "hugepages_root", expectedFields, map[string]string{"hugepages_size_kb": "1048576"})
})

t.Run("when per node hugepages type is enabled then gather all per node metrics successfully", func(t *testing.T) {
Expand All @@ -86,21 +92,33 @@ func TestGather(t *testing.T) {
acc := &testutil.Accumulator{}
require.NoError(t, h.Gather(acc))

require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "2048", "node": "0"}, "free_hugepages", 434))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "2048", "node": "0"}, "surplus_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "2048", "node": "0"}, "nr_hugepages", 1024))
expectedFields := map[string]interface{}{
"free_hugepages": 434,
"surplus_hugepages": 0,
"nr_hugepages": 1024,
}
acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"hugepages_size_kb": "2048", "node": "0"})

require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "2048", "node": "1"}, "free_hugepages", 449))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "2048", "node": "1"}, "surplus_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "2048", "node": "1"}, "nr_hugepages", 1024))
expectedFields = map[string]interface{}{
"free_hugepages": 449,
"surplus_hugepages": 0,
"nr_hugepages": 1024,
}
acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"hugepages_size_kb": "2048", "node": "1"})

require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "1048576", "node": "0"}, "free_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "1048576", "node": "0"}, "surplus_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "1048576", "node": "0"}, "nr_hugepages", 4))
expectedFields = map[string]interface{}{
"free_hugepages": 0,
"surplus_hugepages": 0,
"nr_hugepages": 4,
}
acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"hugepages_size_kb": "1048576", "node": "0"})

require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "1048576", "node": "1"}, "free_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "1048576", "node": "1"}, "surplus_hugepages", 0))
require.True(t, acc.HasPoint("hugepages_per_node", map[string]string{"hugepages_size_kb": "1048576", "node": "1"}, "nr_hugepages", 4))
expectedFields = map[string]interface{}{
"free_hugepages": 0,
"surplus_hugepages": 0,
"nr_hugepages": 4,
}
acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"hugepages_size_kb": "1048576", "node": "1"})
})

t.Run("when meminfo hugepages type is enabled then gather all meminfo metrics successfully", func(t *testing.T) {
Expand All @@ -112,15 +130,18 @@ func TestGather(t *testing.T) {
acc := &testutil.Accumulator{}
require.NoError(t, h.Gather(acc))

require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "AnonHugePages_kb", 0))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "ShmemHugePages_kb", 0))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "FileHugePages_kb", 0))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "HugePages_Total", 2048))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "HugePages_Free", 883))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "HugePages_Rsvd", 0))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "HugePages_Surp", 0))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "Hugepagesize_kb", 2048))
require.True(t, acc.HasPoint("hugepages_meminfo", map[string]string{}, "Hugetlb_kb", 12582912))
expectedFields := map[string]interface{}{
"AnonHugePages_kb": 0,
"ShmemHugePages_kb": 0,
"FileHugePages_kb": 0,
"HugePages_Total": 2048,
"HugePages_Free": 883,
"HugePages_Rsvd": 0,
"HugePages_Surp": 0,
"Hugepagesize_kb": 2048,
"Hugetlb_kb": 12582912,
}
acc.AssertContainsFields(t, "hugepages_meminfo", expectedFields)
})

t.Run("when root hugepages type is enabled but path is invalid then return error", func(t *testing.T) {
Expand Down