Skip to content

Commit

Permalink
chore: upgrade golangci-lint to v1.49
Browse files Browse the repository at this point in the history
  • Loading branch information
Mikołaj Świątek committed Sep 1, 2022
1 parent 0f8fcdd commit 307b0be
Show file tree
Hide file tree
Showing 11 changed files with 58 additions and 115 deletions.
9 changes: 2 additions & 7 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,21 +28,16 @@ linters:
disable-all: true
enable:
- gofmt
- deadcode
- errcheck
- goimports
- misspell
- noctx
- lll
- govet
- ineffassign
- structcheck
- typecheck
- varcheck
# These linters are problematic when using go1.18 and generic code like in golang.org/x/exp/slices
# - unused
# - gosimple
# - staticcheck
- unused
- gosimple

issues:
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
GOLANGCI_LINT_VERSION ?= v1.45.2
GOLANGCI_LINT_VERSION ?= v1.49
SHELL := /usr/bin/env bash

all: markdownlint yamllint
Expand Down
5 changes: 2 additions & 3 deletions pkg/exporter/sumologicexporter/compress.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"

"github.com/klauspost/compress/flate"
"github.com/klauspost/compress/gzip"
Expand All @@ -44,9 +43,9 @@ func newCompressor(format CompressEncodingType) (compressor, error) {

switch format {
case GZIPCompression:
writer = gzip.NewWriter(ioutil.Discard)
writer = gzip.NewWriter(io.Discard)
case DeflateCompression:
writer, err = flate.NewWriter(ioutil.Discard, flate.BestSpeed)
writer, err = flate.NewWriter(io.Discard, flate.BestSpeed)
if err != nil {
return compressor{}, err
}
Expand Down
9 changes: 4 additions & 5 deletions pkg/exporter/sumologicexporter/compress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"compress/gzip"
"errors"
"io"
"io/ioutil"
"strings"
"testing"

Expand Down Expand Up @@ -99,7 +98,7 @@ func decodeGzip(t *testing.T, data io.Reader) string {
require.NoError(t, err)

var buf []byte
buf, err = ioutil.ReadAll(r)
buf, err = io.ReadAll(r)
require.NoError(t, err)

return string(buf)
Expand All @@ -123,7 +122,7 @@ func decodeDeflate(t *testing.T, data io.Reader) string {
r := flate.NewReader(data)

var buf []byte
buf, err := ioutil.ReadAll(r)
buf, err := io.ReadAll(r)
require.NoError(t, err)

return string(buf)
Expand Down Expand Up @@ -167,7 +166,7 @@ func BenchmarkCompression(b *testing.B) {
}

var buf []byte
buf, err = ioutil.ReadAll(r)
buf, err = io.ReadAll(r)
if err != nil {
return "", err
}
Expand All @@ -176,7 +175,7 @@ func BenchmarkCompression(b *testing.B) {
case string(DeflateCompression):
r := flate.NewReader(data)

buf, err := ioutil.ReadAll(r)
buf, err := io.ReadAll(r)
if err != nil {
return "", err
}
Expand Down
13 changes: 0 additions & 13 deletions pkg/exporter/sumologicexporter/fields.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ package sumologicexporter
import (
"strings"

"github.com/google/go-cmp/cmp"
"go.opentelemetry.io/collector/pdata/pcommon"
"golang.org/x/exp/slices"
)
Expand All @@ -39,18 +38,6 @@ func (f fields) isInitialized() bool {
return f.initialized
}

func (f fields) isEmpty() bool {
return f.orig.Len() == 0
}

func (f fields) equals(other fields) bool {
if f.orig.Len() != other.orig.Len() {
return false
}

return cmp.Equal(f.orig.AsRaw(), other.orig.AsRaw())
}

// string returns fields as ordered key=value string with `, ` as separator
func (f fields) string() string {
if !f.initialized {
Expand Down
35 changes: 0 additions & 35 deletions pkg/exporter/sumologicexporter/sender.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,6 @@ func (b *bodyBuilder) Reset() {
b.builder.Reset()
}

// addLine adds line to builder and increments counter
func (b *bodyBuilder) addLine(line string) {
b.builder.WriteString(line) // WriteString can't actually return an error
b.counter += 1
}

// addLine adds multiple lines to builder and increments counter
func (b *bodyBuilder) addLines(lines []string) {
if len(lines) == 0 {
Expand Down Expand Up @@ -131,7 +125,6 @@ func (b *bodyBuilder) toCountingReader() *countingReader {

type sender struct {
logger *zap.Logger
metricBuffer []metricPair
config *Config
client *http.Client
sources sourceFormats
Expand Down Expand Up @@ -682,16 +675,6 @@ func (s *sender) sendOTLPTraces(ctx context.Context, td ptrace.Traces) error {
return nil
}

// cleanMetricBuffer zeroes metricBuffer
func (s *sender) cleanMetricBuffer() {
s.metricBuffer = (s.metricBuffer)[:0]
}

// countMetrics returns number of metrics in metricBuffer
func (s *sender) countMetrics() int {
return len(s.metricBuffer)
}

func addCompressHeader(req *http.Request, enc CompressEncodingType) error {
switch enc {
case GZIPCompression:
Expand Down Expand Up @@ -794,24 +777,6 @@ func (s *sender) addRequestHeaders(req *http.Request, pipeline PipelineType, fld
return nil
}

// addSourceResourceAttributes adds source related attributes:
// * source category
// * source host
// * source name
// to the provided attribute map using the provided fields as values source and using
// the source templates for formatting.
func (s *sender) addSourceRelatedResourceAttributesFromFields(attrs pcommon.Map, flds fields) {
if s.sources.host.isSet() {
attrs.InsertString(attributeKeySourceHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
attrs.InsertString(attributeKeySourceName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
attrs.InsertString(attributeKeySourceCategory, s.sources.category.format(flds))
}
}

// addSourceResourceAttributes adds source related attributes:
// * source category
// * source host
Expand Down
8 changes: 4 additions & 4 deletions pkg/exporter/sumologicexporter/source_format.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ const unrecognizedAttributeValue = "undefined"

// newSourceFormat builds sourceFormat basing on the regex and given text.
// Regex is basing on the `sourceRegex` const
// For given example text: `%{cluster}/%{namespace}``, it sets:
// - template to `%s/%s`, which can be used later by fmt.Sprintf
// - matches as map of (attribute) keys ({"cluster", "namespace"}) which will
// be used to put corresponding value into templates' `%s
// For given example text: `%{cluster}/%{namespace}, it sets:
// - template to `%s/%s`, which can be used later by fmt.Sprintf
// - matches as map of (attribute) keys ({"cluster", "namespace"}) which will
// be used to put corresponding value into templates' `%s
func newSourceFormat(r *regexp.Regexp, text string) sourceFormat {
matches := r.FindAllStringSubmatch(text, -1)
template := r.ReplaceAllString(text, "%s")
Expand Down
6 changes: 3 additions & 3 deletions pkg/extension/sumologicextension/extension.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,9 @@ func (se *SumologicExtension) validateCredentials(
}

// injectCredentials injects the collector credentials:
// * into registration info that's stored in the extension and can be used by roundTripper
// * into http client and its transport so that each request is using collector
// credentials as authentication keys
// - into registration info that's stored in the extension and can be used by roundTripper
// - into http client and its transport so that each request is using collector
// credentials as authentication keys
func (se *SumologicExtension) injectCredentials(colCreds credentials.CollectorCredentials) error {
// Set the registration info so that it can be used in RoundTripper.
se.registrationInfo = colCreds.Credentials
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package bigendianconverter

import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package bigendianconverter

import (
Expand Down
80 changes: 40 additions & 40 deletions pkg/processor/k8sprocessor/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,42 +22,44 @@
// It represents a list of rules that are executed in the specified order until the first one is able to do the match.
// Each rule is specified as a pair of from (representing the rule type) and name (representing the extracted key name).
// Following rule types are available:
// from: "resource_attribute" - allows to specify the attribute name to lookup up in the list of attributes of the received Resource.
// The specified attribute, if it is present, identifies the Pod that is represented by the Resource.
// (the value can contain either IP address, Pod UID or be in `pod_name.namespace_name` format).
// For `pod_name.namespace_name` format, always attributes for actual matching pod will be added.
// from: "connection" - takes the IP attribute from connection context (if available) and automatically
// associates it with "k8s.pod.ip" attribute
// from: "build_hostname" - build hostname from k8s.pod.name concatenated with k8s.namespace.name using dot as separator
// and proceed as for `pod_name.namespace_name` format for `resource_attributes` pod_association.
//
// from: "resource_attribute" - allows to specify the attribute name to lookup up in the list of attributes of the received Resource.
// The specified attribute, if it is present, identifies the Pod that is represented by the Resource.
// (the value can contain either IP address, Pod UID or be in `pod_name.namespace_name` format).
// For `pod_name.namespace_name` format, always attributes for actual matching pod will be added.
// from: "connection" - takes the IP attribute from connection context (if available) and automatically
// associates it with "k8s.pod.ip" attribute
// from: "build_hostname" - build hostname from k8s.pod.name concatenated with k8s.namespace.name using dot as separator
// and proceed as for `pod_name.namespace_name` format for `resource_attributes` pod_association.
//
// Pod association configuration.
// pod_association:
// - from: resource_attribute
// name: ip
// - from: resource_attribute
// name: k8s.pod.ip
// - from: resource_attribute
// name: host.name
// - from: connection
// name: ip
// - from: resource_attribute
// name: k8s.pod.uid
// - from: resource_attribute
// name: ip
// - from: resource_attribute
// name: k8s.pod.ip
// - from: resource_attribute
// name: host.name
// - from: connection
// name: ip
// - from: resource_attribute
// name: k8s.pod.uid
//
// If Pod association rules are not configured resources are associated with metadata only by connection's IP Address.
//
// RBAC
// # RBAC
//
// TODO: mention the required RBAC rules.
//
// Config
// # Config
//
// TODO: example config.
//
// Deployment scenarios
// # Deployment scenarios
//
// The processor supports running both in agent and collector mode.
//
// As an agent
// # As an agent
//
// When running as an agent, the processor detects IP addresses of pods sending spans, metrics or logs to the agent
// and uses this information to extract metadata from pods. When running as an agent, it is important to apply
Expand All @@ -74,26 +76,26 @@
// 1. Use the downward API to inject the node name as an environment variable.
// Add the following snippet under the pod env section of the OpenTelemetry container.
//
// env:
// - name: KUBE_NODE_NAME
// valueFrom:
// fieldRef:
// apiVersion: v1
// fieldPath: spec.nodeName
// env:
// - name: KUBE_NODE_NAME
// valueFrom:
// fieldRef:
// apiVersion: v1
// fieldPath: spec.nodeName
//
// This will inject a new environment variable to the OpenTelemetry container with the value as the
// name of the node the pod was scheduled to run on.
//
// 2. Set "filter.node_from_env_var" to the name of the environment variable holding the node name.
//
// k8s_tagger:
// filter:
// node_from_env_var: KUBE_NODE_NAME # this should be same as the var name used in previous step
// k8s_tagger:
// filter:
// node_from_env_var: KUBE_NODE_NAME # this should be same as the var name used in previous step
//
// This will restrict each OpenTelemetry agent to query pods running on the same node only dramatically reducing
// resource requirements for very large clusters.
//
// As a collector
// # As a collector
//
// The processor can be deployed both as an agent or as a collector.
//
Expand All @@ -108,9 +110,9 @@
// 1. Setup agents in passthrough mode
// Configure the agents' k8s_tagger processors to run in passthrough mode.
//
// # k8s_tagger config for agent
// k8s_tagger:
// passthrough: true
// # k8s_tagger config for agent
// k8s_tagger:
// passthrough: true
//
// This will ensure that the agents detect the IP address as add it as an attribute to all telemetry resources.
// Agents will not make any k8s API calls, do any discovery of pods or extract any metadata.
Expand All @@ -119,19 +121,17 @@
// No special configuration changes are needed to be made on the collector. It'll automatically detect
// the IP address of spans, logs and metrics sent by the agents as well as directly by other services/pods.
//
//
// Caveats
// # Caveats
//
// There are some edge-cases and scenarios where k8s_tagger will not work properly.
//
//
// Host networking mode
// # Host networking mode
//
// The processor cannot correct identify pods running in the host network mode and
// enriching telemetry data generated by such pods is not supported at the moment, unless the attributes contain
// information about the source IP.
//
// As a sidecar
// # As a sidecar
//
// The processor does not support detecting containers from the same pods when running
// as a sidecar. While this can be done, we think it is simpler to just use the kubernetes
Expand Down

0 comments on commit 307b0be

Please sign in to comment.